repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_domain.py
|
import torch
import torch.utils.data as data
import random
import math
from .dataloader_dst import *
from .dataloader_nlg import *
from .dataloader_nlu import *
from .dataloader_dm import *
from .dataloader_usdl import *
def get_loader(args, mode, tokenizer, datasets, unified_meta, shuffle=False):
task = args["task"]
batch_size = args["batch_size"] if mode == "train" else args["eval_batch_size"]
combined_ds = []
for ds in datasets:
combined_ds += datasets[ds][mode]
# do not consider empty system responses
if (args["task_name"] == "rs") or (args["task"] == "dm"):
print("[Info] Remove turns with empty system response...")
combined_ds = [d for d in combined_ds if d["turn_sys"]!=""]
## Ignore the first system utterance for response selection task
if (args["task_name"] == "rs"):
print("[Info] Remove turn=0 system response...")
combined_ds = [d for d in combined_ds if d["turn_id"]!=0]
# control data ratio
if (args["train_data_ratio"] != 1 or args["nb_shots"] != -1) and (mode == "train"):
original_len = len(combined_ds)
if ("oos_intent" in args["dataset"]):
nb_train_sample_per_class = int(100 * args["train_data_ratio"])
class_count = {k: 0 for k in unified_meta["intent"]}
random.Random(args["rand_seed"]).shuffle(combined_ds)
pair_trn_new = []
for d in combined_ds:
if class_count[d["intent"]] < nb_train_sample_per_class:
pair_trn_new.append(d)
class_count[d["intent"]] += 1
combined_ds = pair_trn_new
else:
if args["train_data_ratio"] != 1:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:int(len(combined_ds)*args["train_data_ratio"])]
else:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:args["nb_shots"]]
print("[INFO] Use Training Data: from {} to {}".format(original_len, len(combined_ds)))
data_info = {k: [] for k in combined_ds[0].keys()}
for d in combined_ds:
for k in combined_ds[0].keys():
data_info[k].append(d[k])
dataset = globals()["Dataset_"+task](data_info, tokenizer, args, unified_meta, mode, args["max_seq_length"])
bool_shuffle = (mode=="train" or shuffle)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=bool_shuffle,
collate_fn=globals()["collate_fn_{}_{}".format(task, args["example_type"])])
return data_loader
def get_unified_meta(datasets):
unified_meta = {"others":None}
for ds in datasets:
for key, value in datasets[ds]["meta"].items():
if key not in unified_meta.keys():
unified_meta[key] = {}
if type(value) == list:
for v in value:
if v not in unified_meta[key].keys():
unified_meta[key][v] = len(unified_meta[key])
else:
unified_meta[key] = value
return unified_meta
| 3,348 | 39.349398 | 122 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_camrest676.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, file_name, max_line = None):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
with open(file_name) as f:
dials = json.load(f)
cnt_lin = 1
for dial_dict in dials:
dialog_history = [""]
# Reading data
for ti, turn in enumerate(dial_dict["dial"]):
assert ti == turn["turn"]
turn_usr = turn["usr"]["transcript"].lower().strip()
turn_sys = turn["sys"]["sent"].lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "camrest676-"+str(cnt_lin)
data_detail["turn_id"] = turn["turn"]
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn_usr)
dialog_history.append(turn_sys)
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_camrest676(args):
example_type = args["example_type"]
max_line = args["max_line"]
file_trn = os.path.join(args["data_path"], 'CamRest676/CamRest676.json')
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, file_trn, max_line)
pair_dev = []
pair_tst = []
print("Read %s pairs train from CamRest676" % len(pair_trn))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 2,180 | 28.472973 | 93 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_universal_act.py
|
import json
import ast
import os
from .utils_function import get_input_example
def read_langs_turn(file_name, max_line = None):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
domain_counter = {}
with open(file_name) as f:
dials = json.load(f)
print("len dials", len(dials))
cnt_lin = 1
for dial_list in dials:
dialog_history = []
sys_first_flag = 1 if (dial_list[0]["speaker"]=="[SYS]") else 0
# Reading data
for ti, turn in enumerate(dial_list):
data_detail = get_input_example("turn")
data_detail["ID"] = turn["conv_id"]
data_detail["dialog_history"] = list(dialog_history)
if sys_first_flag and ti % 2 == 1:
data_detail["turn_id"] = ti//2
data_detail["turn_usr"] = turn["raw_text"].strip()
data_detail["turn_sys"] = dial_list[ti-1]["raw_text"].strip()
data_detail["sys_act"] = dial_list[ti-1]["label"]
data.append(data_detail)
dialog_history.append(data_detail["turn_sys"])
dialog_history.append(data_detail["turn_usr"])
elif not sys_first_flag and ti % 2 == 0:
data_detail["turn_id"] = (ti+1)//2
data_detail["turn_usr"] = turn["raw_text"].strip()
data_detail["turn_sys"] = dial_list[ti-1]["raw_text"].strip() if ti > 0 else ""
data_detail["sys_act"] = dial_list[ti-1]["label"] if ti > 0 else []
data.append(data_detail)
dialog_history.append(data_detail["turn_sys"])
dialog_history.append(data_detail["turn_usr"])
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, label_dict, max_line = None):
raise NotImplementedError
def prepare_data_universal_act_dstc2(args):
example_type = args["example_type"]
max_line = args["max_line"]
file_trn = os.path.join(args["data_path"], 'universal_dialog_act/dstc2/train.json')
file_dev = os.path.join(args["data_path"], 'universal_dialog_act/dstc2/valid.json')
file_tst = os.path.join(args["data_path"], 'universal_dialog_act/dstc2/test.json')
file_label = os.path.join(args["data_path"], 'universal_dialog_act/dstc2/labels.txt')
#file_label = '/export/home/dialog_datasets/universal_dialog_act/acts.txt'
label_dict = {line.replace("\n", ""):i for i, line in enumerate(open(file_label, "r").readlines())}
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](file_trn, max_line)
pair_dev = globals()["read_langs_{}".format(_example_type)](file_dev, max_line)
pair_tst = globals()["read_langs_{}".format(_example_type)](file_tst, max_line)
print("Read {} pairs train from {}".format(len(pair_trn), file_trn))
print("Read {} pairs valid from {}".format(len(pair_dev), file_dev))
print("Read {} pairs test from {}".format(len(pair_tst), file_tst))
meta_data = {"sysact":label_dict, "num_labels":len(label_dict)}
print("meta_data", meta_data)
return pair_trn, pair_dev, pair_tst, meta_data
def prepare_data_universal_act_sim_joint(args):
example_type = args["example_type"]
max_line = args["max_line"]
file_trn = os.path.join(args["data_path"], 'universal_dialog_act/sim_joint/train.json')
file_dev = os.path.join(args["data_path"], 'universal_dialog_act/sim_joint/valid.json')
file_tst = os.path.join(args["data_path"], 'universal_dialog_act/sim_joint/test.json')
file_label = os.path.join(args["data_path"], 'universal_dialog_act/sim_joint/labels.txt')
#file_label = '/export/home/dialog_datasets/universal_dialog_act/acts.txt'
label_dict = {line.replace("\n", ""):i for i, line in enumerate(open(file_label, "r").readlines())}
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](file_trn, max_line)
pair_dev = globals()["read_langs_{}".format(_example_type)](file_dev, max_line)
pair_tst = globals()["read_langs_{}".format(_example_type)](file_tst, max_line)
print("Read {} pairs train from {}".format(len(pair_trn), file_trn))
print("Read {} pairs valid from {}".format(len(pair_dev), file_dev))
print("Read {} pairs test from {}".format(len(pair_tst), file_tst))
meta_data = {"sysact":label_dict, "num_labels":len(label_dict)}
print("meta_data", meta_data)
return pair_trn, pair_dev, pair_tst, meta_data
| 4,855 | 43.550459 | 103 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/multiwoz/nlp.py
|
import math
import re
from collections import Counter
from nltk.util import ngrams
timepat = re.compile("\d{1,2}[:]\d{1,2}")
pricepat = re.compile("\d{1,3}[.]\d{1,2}")
fin = open('utils/multiwoz/mapping.pair', 'r')
replacements = []
for line in fin.readlines():
tok_from, tok_to = line.replace('\n', '').split('\t')
replacements.append((' ' + tok_from + ' ', ' ' + tok_to + ' '))
def insertSpace(token, text):
sidx = 0
while True:
sidx = text.find(token, sidx)
if sidx == -1:
break
if sidx + 1 < len(text) and re.match('[0-9]', text[sidx - 1]) and \
re.match('[0-9]', text[sidx + 1]):
sidx += 1
continue
if text[sidx - 1] != ' ':
text = text[:sidx] + ' ' + text[sidx:]
sidx += 1
if sidx + len(token) < len(text) and text[sidx + len(token)] != ' ':
text = text[:sidx + 1] + ' ' + text[sidx + 1:]
sidx += 1
return text
def normalize(text, clean_value=True):
# lower case every word
text = text.lower()
# replace white spaces in front and end
text = re.sub(r'^\s*|\s*$', '', text)
# hotel domain pfb30
text = re.sub(r"b&b", "bed and breakfast", text)
text = re.sub(r"b and b", "bed and breakfast", text)
if clean_value:
# normalize phone number
ms = re.findall('\(?(\d{3})\)?[-.\s]?(\d{3})[-.\s]?(\d{4,5})', text)
if ms:
sidx = 0
for m in ms:
sidx = text.find(m[0], sidx)
if text[sidx - 1] == '(':
sidx -= 1
eidx = text.find(m[-1], sidx) + len(m[-1])
text = text.replace(text[sidx:eidx], ''.join(m))
# normalize postcode
ms = re.findall('([a-z]{1}[\. ]?[a-z]{1}[\. ]?\d{1,2}[, ]+\d{1}[\. ]?[a-z]{1}[\. ]?[a-z]{1}|[a-z]{2}\d{2}[a-z]{2})',
text)
if ms:
sidx = 0
for m in ms:
sidx = text.find(m, sidx)
eidx = sidx + len(m)
text = text[:sidx] + re.sub('[,\. ]', '', m) + text[eidx:]
# weird unicode bug
text = re.sub(u"(\u2018|\u2019)", "'", text)
if clean_value:
# replace time and and price
text = re.sub(timepat, ' [value_time] ', text)
text = re.sub(pricepat, ' [value_price] ', text)
#text = re.sub(pricepat2, '[value_price]', text)
# replace st.
text = text.replace(';', ',')
text = re.sub('$\/', '', text)
text = text.replace('/', ' and ')
# replace other special characters
text = text.replace('-', ' ')
text = re.sub('[\"\<>@\(\)]', '', text) # remove
# insert white space before and after tokens:
for token in ['?', '.', ',', '!']:
text = insertSpace(token, text)
# insert white space for 's
text = insertSpace('\'s', text)
# replace it's, does't, you'd ... etc
text = re.sub('^\'', '', text)
text = re.sub('\'$', '', text)
text = re.sub('\'\s', ' ', text)
text = re.sub('\s\'', ' ', text)
for fromx, tox in replacements:
text = ' ' + text + ' '
text = text.replace(fromx, tox)[1:-1]
# remove multiple spaces
text = re.sub(' +', ' ', text)
# concatenate numbers
tmp = text
tokens = text.split()
i = 1
while i < len(tokens):
if re.match(u'^\d+$', tokens[i]) and \
re.match(u'\d+$', tokens[i - 1]):
tokens[i - 1] += tokens[i]
del tokens[i]
else:
i += 1
text = ' '.join(tokens)
return text
class BLEUScorer(object):
## BLEU score calculator via GentScorer interface
## it calculates the BLEU-4 by taking the entire corpus in
## Calulate based multiple candidates against multiple references
def __init__(self):
pass
def score(self, hypothesis, corpus, n=1):
# containers
count = [0, 0, 0, 0]
clip_count = [0, 0, 0, 0]
r = 0
c = 0
weights = [0.25, 0.25, 0.25, 0.25]
# accumulate ngram statistics
for hyps, refs in zip(hypothesis, corpus):
if type(hyps[0]) is list:
hyps = [hyp.split() for hyp in hyps[0]]
else:
hyps = [hyp.split() for hyp in hyps]
refs = [ref.split() for ref in refs]
# Shawn's evaluation
refs[0] = [u'GO_'] + refs[0] + [u'EOS_']
hyps[0] = [u'GO_'] + hyps[0] + [u'EOS_']
for idx, hyp in enumerate(hyps):
for i in range(4):
# accumulate ngram counts
hypcnts = Counter(ngrams(hyp, i + 1))
cnt = sum(hypcnts.values())
count[i] += cnt
# compute clipped counts
max_counts = {}
for ref in refs:
refcnts = Counter(ngrams(ref, i + 1))
for ng in hypcnts:
max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
clipcnt = dict((ng, min(count, max_counts[ng])) \
for ng, count in hypcnts.items())
clip_count[i] += sum(clipcnt.values())
# accumulate r & c
bestmatch = [1000, 1000]
for ref in refs:
if bestmatch[0] == 0: break
diff = abs(len(ref) - len(hyp))
if diff < bestmatch[0]:
bestmatch[0] = diff
bestmatch[1] = len(ref)
r += bestmatch[1]
c += len(hyp)
if n == 1:
break
# computing bleu score
p0 = 1e-7
bp = 1 if c > r else math.exp(1 - float(r) / float(c))
p_ns = [float(clip_count[i]) / float(count[i] + p0) + p0 \
for i in range(4)]
s = math.fsum(w * math.log(p_n) \
for w, p_n in zip(weights, p_ns) if p_n)
bleu = bp * math.exp(s)
return bleu
class GentScorer(object):
def __init__(self, detectfile):
self.bleuscorer = BLEUScorer()
def scoreBLEU(self, parallel_corpus):
return self.bleuscorer.score(parallel_corpus)
def sentence_bleu_4(hyp, refs, weights=[0.25, 0.25, 0.25, 0.25]):
# input : single sentence, multiple references
count = [0, 0, 0, 0]
clip_count = [0, 0, 0, 0]
r = 0
c = 0
for i in range(4):
hypcnts = Counter(ngrams(hyp, i + 1))
cnt = sum(hypcnts.values())
count[i] += cnt
# compute clipped counts
max_counts = {}
for ref in refs:
refcnts = Counter(ngrams(ref, i + 1))
for ng in hypcnts:
max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
clipcnt = dict((ng, min(count, max_counts[ng])) \
for ng, count in hypcnts.items())
clip_count[i] += sum(clipcnt.values())
bestmatch = [1000, 1000]
for ref in refs:
if bestmatch[0] == 0:
break
diff = abs(len(ref) - len(hyp))
if diff < bestmatch[0]:
bestmatch[0] = diff
bestmatch[1] = len(ref)
r = bestmatch[1]
c = len(hyp)
p0 = 1e-7
bp = math.exp(-abs(1.0 - float(r) / float(c + p0)))
p_ns = [float(clip_count[i]) / float(count[i] + p0) + p0 for i in range(4)]
s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
bleu_hyp = bp * math.exp(s)
return bleu_hyp
if __name__ == '__main__':
text = "restaurant's CB39AL one seven"
text = "I'm I'd restaurant's CB39AL 099939399 one seven"
text = "ndd 19.30 nndd"
m = re.findall("(\d+\.\d+)", text)
| 7,781 | 30.763265 | 124 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/multiwoz/delexicalize.py
|
import re
import simplejson as json
from .nlp import normalize
digitpat = re.compile('\d+')
timepat = re.compile("\d{1,2}[:]\d{1,2}")
pricepat2 = re.compile("\d{1,3}[.]\d{1,2}")
# FORMAT
# domain_value
# restaurant_postcode
# restaurant_address
# taxi_car8
# taxi_number
# train_id etc..
def prepareSlotValuesIndependent():
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
dic = []
dic_area = []
dic_food = []
dic_price = []
# read databases
for domain in domains:
try:
fin = open('data/multi-woz/db/' + domain + '_db.json', 'r')
db_json = json.load(fin)
fin.close()
for ent in db_json:
for key, val in ent.items():
if val == '?' or val == 'free':
pass
elif key == 'address':
dic.append((normalize(val), '[' + domain + '_' + 'address' + ']'))
if "road" in val:
val = val.replace("road", "rd")
dic.append((normalize(val), '[' + domain + '_' + 'address' + ']'))
elif "rd" in val:
val = val.replace("rd", "road")
dic.append((normalize(val), '[' + domain + '_' + 'address' + ']'))
elif "st" in val:
val = val.replace("st", "street")
dic.append((normalize(val), '[' + domain + '_' + 'address' + ']'))
elif "street" in val:
val = val.replace("street", "st")
dic.append((normalize(val), '[' + domain + '_' + 'address' + ']'))
elif key == 'name':
dic.append((normalize(val), '[' + domain + '_' + 'name' + ']'))
if "b & b" in val:
val = val.replace("b & b", "bed and breakfast")
dic.append((normalize(val), '[' + domain + '_' + 'name' + ']'))
elif "bed and breakfast" in val:
val = val.replace("bed and breakfast", "b & b")
dic.append((normalize(val), '[' + domain + '_' + 'name' + ']'))
elif "hotel" in val and 'gonville' not in val:
val = val.replace("hotel", "")
dic.append((normalize(val), '[' + domain + '_' + 'name' + ']'))
elif "restaurant" in val:
val = val.replace("restaurant", "")
dic.append((normalize(val), '[' + domain + '_' + 'name' + ']'))
elif key == 'postcode':
dic.append((normalize(val), '[' + domain + '_' + 'postcode' + ']'))
elif key == 'phone':
dic.append((val, '[' + domain + '_' + 'phone' + ']'))
elif key == 'trainID':
dic.append((normalize(val), '[' + domain + '_' + 'id' + ']'))
elif key == 'department':
dic.append((normalize(val), '[' + domain + '_' + 'department' + ']'))
# NORMAL DELEX
elif key == 'area':
dic_area.append((normalize(val), '[' + 'value' + '_' + 'area' + ']'))
elif key == 'food':
dic_food.append((normalize(val), '[' + 'value' + '_' + 'food' + ']'))
elif key == 'pricerange':
dic_price.append((normalize(val), '[' + 'value' + '_' + 'pricerange' + ']'))
else:
pass
# TODO car type?
except:
pass
if domain == 'hospital':
dic.append((normalize('Hills Rd'), '[' + domain + '_' + 'address' + ']'))
dic.append((normalize('Hills Road'), '[' + domain + '_' + 'address' + ']'))
dic.append((normalize('CB20QQ'), '[' + domain + '_' + 'postcode' + ']'))
dic.append(('01223245151', '[' + domain + '_' + 'phone' + ']'))
dic.append(('1223245151', '[' + domain + '_' + 'phone' + ']'))
dic.append(('0122324515', '[' + domain + '_' + 'phone' + ']'))
dic.append((normalize('Addenbrookes Hospital'), '[' + domain + '_' + 'name' + ']'))
elif domain == 'police':
dic.append((normalize('Parkside'), '[' + domain + '_' + 'address' + ']'))
dic.append((normalize('CB11JG'), '[' + domain + '_' + 'postcode' + ']'))
dic.append(('01223358966', '[' + domain + '_' + 'phone' + ']'))
dic.append(('1223358966', '[' + domain + '_' + 'phone' + ']'))
dic.append((normalize('Parkside Police Station'), '[' + domain + '_' + 'name' + ']'))
# add at the end places from trains
fin = open('data/multi-woz/db/' + 'train' + '_db.json', 'r')
db_json = json.load(fin)
fin.close()
for ent in db_json:
for key, val in ent.items():
if key == 'departure' or key == 'destination':
dic.append((normalize(val), '[' + 'value' + '_' + 'place' + ']'))
# add specific values:
for key in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:
dic.append((normalize(key), '[' + 'value' + '_' + 'day' + ']'))
# more general values add at the end
dic.extend(dic_area)
dic.extend(dic_food)
dic.extend(dic_price)
return dic
def delexicalise(utt, dictionary):
for key, val in dictionary:
utt = (' ' + utt + ' ').replace(' ' + key + ' ', ' ' + val + ' ')
utt = utt[1:-1] # why this?
return utt
def delexicaliseDomain(utt, dictionary, domain):
for key, val in dictionary:
if key == domain or key == 'value':
utt = (' ' + utt + ' ').replace(' ' + key + ' ', ' ' + val + ' ')
utt = utt[1:-1] # why this?
# go through rest of domain in case we are missing something out?
for key, val in dictionary:
utt = (' ' + utt + ' ').replace(' ' + key + ' ', ' ' + val + ' ')
utt = utt[1:-1] # why this?
return utt
if __name__ == '__main__':
prepareSlotValuesIndependent()
| 6,421 | 42.391892 | 100 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/multiwoz/dbPointer.py
|
import sqlite3
import numpy as np
from .nlp import normalize
# loading databases
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital']#, 'police']
dbs = {}
for domain in domains:
db = 'data/multi-woz/db/{}-dbase.db'.format(domain)
conn = sqlite3.connect(db)
c = conn.cursor()
dbs[domain] = c
def oneHotVector(num, domain, vector):
"""Return number of available entities for particular domain."""
number_of_options = 6
if domain != 'train':
idx = domains.index(domain)
if num == 0:
vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0,0])
elif num == 1:
vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])
elif num == 2:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 1, 0, 0, 0])
elif num == 3:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 1, 0, 0])
elif num == 4:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 1, 0])
elif num >= 5:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 0, 1])
else:
idx = domains.index(domain)
if num == 0:
vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0, 0])
elif num <= 2:
vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])
elif num <= 5:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 1, 0, 0, 0])
elif num <= 10:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 1, 0, 0])
elif num <= 40:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 1, 0])
elif num > 40:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 0, 1])
return vector
def queryResult(domain, turn):
"""Returns the list of entities for a given domain
based on the annotation of the belief state"""
# query the db
sql_query = "select * from {}".format(domain)
flag = True
#print turn['metadata'][domain]['semi']
for key, val in turn['metadata'][domain]['semi'].items():
if val == "" or val == "dont care" or val == 'not mentioned' or val == "don't care" or val == "dontcare" or val == "do n't care":
pass
else:
if flag:
sql_query += " where "
val2 = val.replace("'", "''")
#val2 = normalize(val2)
# change query for trains
if key == 'leaveAt':
sql_query += r" " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" " + key + "=" + r"'" + val2 + r"'"
flag = False
else:
val2 = val.replace("'", "''")
#val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" and " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" and " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" and " + key + "=" + r"'" + val2 + r"'"
#try: # "select * from attraction where name = 'queens college'"
#print sql_query
#print domain
num_entities = len(dbs[domain].execute(sql_query).fetchall())
return num_entities
def queryResultVenues(domain, turn, real_belief=False):
# query the db
sql_query = "select * from {}".format(domain)
if real_belief == True:
items = turn.items()
elif real_belief=='tracking':
for slot in turn[domain]:
key = slot[0].split("-")[1]
val = slot[0].split("-")[2]
if key == "price range":
key = "pricerange"
elif key == "leave at":
key = "leaveAt"
elif key == "arrive by":
key = "arriveBy"
if val == "do n't care":
pass
else:
if flag:
sql_query += " where "
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" " + key + "=" + r"'" + val2 + r"'"
flag = False
else:
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" and " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" and " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" and " + key + "=" + r"'" + val2 + r"'"
try: # "select * from attraction where name = 'queens college'"
return dbs[domain].execute(sql_query).fetchall()
except:
return [] # TODO test it
pass
else:
items = turn['metadata'][domain]['semi'].items()
flag = True
for key, val in items:
if val == "" or val == "dontcare" or val == 'not mentioned' or val == "don't care" or val == "dont care" or val == "do n't care":
pass
else:
if flag:
sql_query += " where "
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" " +key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" " + key + "=" + r"'" + val2 + r"'"
flag = False
else:
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" and " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" and " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" and " + key + "=" + r"'" + val2 + r"'"
try: # "select * from attraction where name = 'queens college'"
return dbs[domain].execute(sql_query).fetchall()
except:
return [] # TODO test it
| 6,600 | 37.377907 | 137 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/multiwoz/fix_label.py
|
def fix_general_label_error(labels, type, slots, ontology_version=""):
label_dict = dict([ (l[0], l[1]) for l in labels]) if type else dict([ (l["slots"][0][0], l["slots"][0][1]) for l in labels])
GENERAL_TYPO = {
# type
"guesthouse":"guest house","guesthouses":"guest house","guest":"guest house","mutiple sports":"multiple sports",
"mutliple sports":"multiple sports","sports":"multiple sports","swimmingpool":"swimming pool",
"concerthall":"concert hall", "concert":"concert hall", "pool":"swimming pool", "night club":"nightclub", "mus":"museum",
"colleges":"college", "coll":"college","architectural":"architecture", "musuem":"museum", "churches":"church",
# area
"center":"centre", "center of town":"centre", "near city center":"centre", "in the north":"north",
"cen":"centre", "east side":"east","east area":"east", "west part of town":"west", "ce":"centre",
"town center":"centre", "centre of cambridge":"centre",
"city center":"centre", "the south":"south", "scentre":"centre", "town centre":"centre", "in town":"centre",
"north part of town":"north", "centre of town":"centre", "cb30aq": "none",
# price
"mode":"moderate", "moderate -ly": "moderate", "mo":"moderate",
# day
"monda": "monday",
# parking
"free parking":"free",
# internet
"free internet":"yes",
# star
"4 star":"4", "4 stars":"4", "0 star rarting":"none",
# others
"y":"yes", "any":"do n't care", "does not care":"do n't care", "not men":"none", "not":"none",
"not mentioned":"none", '':"none", "not mendtioned":"none", "3 .":"3", "does not":"no", "fun":"none",
}
for slot in slots:
if slot in label_dict.keys():
# general typos
if label_dict[slot] in GENERAL_TYPO.keys():
label_dict[slot] = label_dict[slot].replace(label_dict[slot], GENERAL_TYPO[label_dict[slot]])
# do not care
if label_dict[slot] in ["doesn't care", "don't care", "dont care", "does not care", "do not care", "dontcare"]:
label_dict[slot] = "do n't care"
# miss match slot and value
if slot == "hotel-type" and label_dict[slot] in ["nigh", "moderate -ly priced", "bed and breakfast", "centre", "venetian", "intern", "a cheap -er hotel"]:
label_dict[slot] = "none"
if slot == "hotel-internet" and label_dict[slot] == "4":
label_dict[slot] = "none"
if slot == "hotel-internet" and label_dict[slot] == "4":
label_dict[slot] = "none"
if slot == "hotel-pricerange" and label_dict[slot] == "2":
label_dict[slot] = "none"
if "area" in slot and label_dict[slot] in ["moderate"]:
label_dict[slot] = "none"
if "day" in slot and label_dict[slot] == "t":
label_dict[slot] = "none"
if slot == "hotel-type" and label_dict[slot] in ["hotel with free parking and free wifi", "4", "3 star hotel"]:
label_dict[slot] = "hotel"
if slot == "hotel-star" and label_dict[slot] == "3 star hotel":
label_dict[slot] = "3"
if "area" in slot:
if label_dict[slot] == "no":
label_dict[slot] = "north"
elif label_dict[slot] == "we":
label_dict[slot] = "west"
elif label_dict[slot] == "cent":
label_dict[slot] = "centre"
if "day" in slot:
if label_dict[slot] == "we":
label_dict[slot] = "wednesday"
elif label_dict[slot] == "no":
label_dict[slot] = "none"
if "price" in slot and label_dict[slot] == "ch":
label_dict[slot] = "cheap"
if "internet" in slot and label_dict[slot] == "free":
label_dict[slot] = "yes"
# Add on May, 2020
if ontology_version in ["1.0"]:
label_dict[slot] = label_dict[slot].replace("theater", "theatre").replace("guesthouse", "guest house")
# Typo or naming
if label_dict[slot] == "cafe uno":
label_dict[slot] = "caffe uno"
if label_dict[slot] == "alpha milton guest house":
label_dict[slot] = "alpha-milton guest house"
if label_dict[slot] in ["churchills college", "churchhill college", "churchill", "the churchill college"]:
label_dict[slot] = "churchill college"
if label_dict[slot] == "portugese":
label_dict[slot] = "portuguese"
if label_dict[slot] == "pizza hut fenditton":
label_dict[slot] = "pizza hut fen ditton"
if label_dict[slot] == "restaurant 17":
label_dict[slot] = "restaurant one seven"
if label_dict[slot] == "restaurant 2 two":
label_dict[slot] = "restaurant two two"
if label_dict[slot] == "gallery at 12 a high street":
label_dict[slot] = "gallery at twelve a high street"
if label_dict[slot] == "museum of archaelogy":
label_dict[slot] = "museum of archaelogy and anthropology"
if label_dict[slot] in ["huntingdon marriot hotel", "marriot hotel"]:
label_dict[slot] = "huntingdon marriott hotel"
if label_dict[slot] in ["sheeps green and lammas land park fen causeway", "sheeps green and lammas land park"]:
label_dict[slot] = "sheep's green and lammas land park fen causeway"
if label_dict[slot] in ["cambridge and country folk museum", "county folk museum"]:
label_dict[slot] = "cambridge and county folk museum"
if label_dict[slot] == "ambridge":
label_dict[slot] = "cambridge"
if label_dict[slot] == "cambridge contemporary art museum":
label_dict[slot] = "cambridge contemporary art"
if label_dict[slot] == "molecular gastonomy":
label_dict[slot] = "molecular gastronomy"
if label_dict[slot] == "2 two and cote":
label_dict[slot] = "two two and cote"
if label_dict[slot] == "caribbeanindian":
label_dict[slot] = "caribbean|indian"
if label_dict[slot] == "whipple museum":
label_dict[slot] = "whipple museum of the history of science"
if label_dict[slot] == "ian hong":
label_dict[slot] = "ian hong house"
if label_dict[slot] == "sundaymonday":
label_dict[slot] = "sunday|monday"
if label_dict[slot] == "mondaythursday":
label_dict[slot] = "monday|thursday"
if label_dict[slot] == "fridaytuesday":
label_dict[slot] = "friday|tuesday"
if label_dict[slot] == "cheapmoderate":
label_dict[slot] = "cheap|moderate"
if label_dict[slot] == "golden house golden house":
label_dict[slot] = "the golden house"
if label_dict[slot] == "golden house":
label_dict[slot] = "the golden house"
if label_dict[slot] == "sleeperz":
label_dict[slot] = "sleeperz hotel"
if label_dict[slot] == "jamaicanchinese":
label_dict[slot] = "jamaican|chinese"
if label_dict[slot] == "shiraz":
label_dict[slot] = "shiraz restaurant"
if label_dict[slot] == "museum of archaelogy and anthropogy":
label_dict[slot] = "museum of archaelogy and anthropology"
if label_dict[slot] == "yipee noodle bar":
label_dict[slot] = "yippee noodle bar"
if label_dict[slot] == "abc theatre":
label_dict[slot] = "adc theatre"
if label_dict[slot] == "wankworth house":
label_dict[slot] = "warkworth house"
if label_dict[slot] in ["cherry hinton water play park", "cherry hinton water park"]:
label_dict[slot] = "cherry hinton water play"
if label_dict[slot] == "the gallery at 12":
label_dict[slot] = "the gallery at twelve"
if label_dict[slot] == "barbequemodern european":
label_dict[slot] = "barbeque|modern european"
if label_dict[slot] == "north americanindian":
label_dict[slot] = "north american|indian"
if label_dict[slot] == "chiquito":
label_dict[slot] = "chiquito restaurant bar"
# Abbreviation
if label_dict[slot] == "city centre north bed and breakfast":
label_dict[slot] = "city centre north b and b"
if label_dict[slot] == "north bed and breakfast":
label_dict[slot] = "north b and b"
# Article and 's
if label_dict[slot] == "christ college":
label_dict[slot] = "christ's college"
if label_dict[slot] == "kings college":
label_dict[slot] = "king's college"
if label_dict[slot] == "saint johns college":
label_dict[slot] = "saint john's college"
if label_dict[slot] == "kettles yard":
label_dict[slot] = "kettle's yard"
if label_dict[slot] == "rosas bed and breakfast":
label_dict[slot] = "rosa's bed and breakfast"
if label_dict[slot] == "saint catharines college":
label_dict[slot] = "saint catharine's college"
if label_dict[slot] == "little saint marys church":
label_dict[slot] = "little saint mary's church"
if label_dict[slot] == "great saint marys church":
label_dict[slot] = "great saint mary's church"
if label_dict[slot] in ["queens college", "queens' college"]:
label_dict[slot] = "queen's college"
if label_dict[slot] == "peoples portraits exhibition at girton college":
label_dict[slot] = "people's portraits exhibition at girton college"
if label_dict[slot] == "st johns college":
label_dict[slot] = "saint john's college"
if label_dict[slot] == "whale of time":
label_dict[slot] = "whale of a time"
if label_dict[slot] in ["st catharines college", "saint catharines college"]:
label_dict[slot] = "saint catharine's college"
# Time
if label_dict[slot] == "16,15":
label_dict[slot] = "16:15"
if label_dict[slot] == "1330":
label_dict[slot] = "13:30"
if label_dict[slot] == "1430":
label_dict[slot] = "14:30"
if label_dict[slot] == "1532":
label_dict[slot] = "15:32"
if label_dict[slot] == "845":
label_dict[slot] = "08:45"
if label_dict[slot] == "1145":
label_dict[slot] = "11:45"
if label_dict[slot] == "1545":
label_dict[slot] = "15:45"
if label_dict[slot] == "1329":
label_dict[slot] = "13:29"
if label_dict[slot] == "1345":
label_dict[slot] = "13:45"
if label_dict[slot] == "1715":
label_dict[slot] = "17:15"
if label_dict[slot] == "929":
label_dict[slot] = "09:29"
# restaurant
if slot == "restaurant-name" and "meze bar" in label_dict[slot]:
label_dict[slot] = "meze bar restaurant"
if slot == "restaurant-name" and label_dict[slot] == "alimentum":
label_dict[slot] = "restaurant alimentum"
if slot == "restaurant-name" and label_dict[slot] == "good luck":
label_dict[slot] = "the good luck chinese food takeaway"
if slot == "restaurant-name" and label_dict[slot] == "grafton hotel":
label_dict[slot] = "grafton hotel restaurant"
if slot == "restaurant-name" and label_dict[slot] == "2 two":
label_dict[slot] = "restaurant two two"
if slot == "restaurant-name" and label_dict[slot] == "hotpot":
label_dict[slot] = "the hotpot"
if slot == "restaurant-name" and label_dict[slot] == "hobsons house":
label_dict[slot] = "hobson house"
if slot == "restaurant-name" and label_dict[slot] == "shanghai":
label_dict[slot] = "shanghai family restaurant"
if slot == "restaurant-name" and label_dict[slot] == "17":
label_dict[slot] = "restaurant one seven"
if slot == "restaurant-name" and label_dict[slot] in ["22", "restaurant 22"]:
label_dict[slot] = "restaurant two two"
if slot == "restaurant-name" and label_dict[slot] == "the maharajah tandoor":
label_dict[slot] = "maharajah tandoori restaurant"
if slot == "restaurant-name" and label_dict[slot] == "the grafton hotel":
label_dict[slot] = "grafton hotel restaurant"
if slot == "restaurant-name" and label_dict[slot] == "gardenia":
label_dict[slot] = "the gardenia"
if slot == "restaurant-name" and label_dict[slot] == "el shaddia guest house":
label_dict[slot] = "el shaddai"
if slot == "restaurant-name" and label_dict[slot] == "the bedouin":
label_dict[slot] = "bedouin"
if slot == "restaurant-name" and label_dict[slot] == "the kohinoor":
label_dict[slot] = "kohinoor"
if slot == "restaurant-name" and label_dict[slot] == "the peking":
label_dict[slot] = "peking restaurant"
if slot == "restaurant-book time" and label_dict[slot] == "7pm":
label_dict[slot] = "19:00"
if slot == "restaurant-book time" and label_dict[slot] == "4pm":
label_dict[slot] = "16:00"
if slot == "restaurant-book time" and label_dict[slot] == "8pm":
label_dict[slot] = "20:00"
if slot == "restaurant-name" and label_dict[slot] == "sitar":
label_dict[slot] = "sitar tandoori"
if slot == "restaurant-name" and label_dict[slot] == "binh":
label_dict[slot] = "thanh binh"
if slot == "restaurant-name" and label_dict[slot] == "mahal":
label_dict[slot] = "mahal of cambridge"
# attraction
if slot == "attraction-name" and label_dict[slot] == "scudamore":
label_dict[slot] = "scudamores punting co"
if slot == "attraction-name" and label_dict[slot] == "salsa":
label_dict[slot] = "club salsa"
if slot == "attraction-name" and label_dict[slot] in ["abbey pool", "abbey pool and astroturf"]:
label_dict[slot] = "abbey pool and astroturf pitch"
if slot == "attraction-name" and label_dict[slot] == "cherry hinton hall":
label_dict[slot] = "cherry hinton hall and grounds"
if slot == "attraction-name" and label_dict[slot] == "trinity street college":
label_dict[slot] = "trinity college"
if slot == "attraction-name" and label_dict[slot] == "the wandlebury":
label_dict[slot] = "wandlebury country park"
if slot == "attraction-name" and label_dict[slot] == "king hedges learner pool":
label_dict[slot] = "kings hedges learner pool"
if slot == "attraction-name" and label_dict[slot] in ["botanic gardens", "cambridge botanic gardens"]:
label_dict[slot] = "cambridge university botanic gardens"
if slot == "attraction-name" and label_dict[slot] == "soultree":
label_dict[slot] = "soul tree nightclub"
if slot == "attraction-name" and label_dict[slot] == "queens":
label_dict[slot] = "queen's college"
if slot == "attraction-name" and label_dict[slot] == "sheeps green":
label_dict[slot] = "sheep's green and lammas land park fen causeway"
if slot == "attraction-name" and label_dict[slot] == "jesus green":
label_dict[slot] = "jesus green outdoor pool"
if slot == "attraction-name" and label_dict[slot] == "adc":
label_dict[slot] = "adc theatre"
if slot == "attraction-name" and label_dict[slot] == "hobsons house":
label_dict[slot] = "hobson house"
if slot == "attraction-name" and label_dict[slot] == "cafe jello museum":
label_dict[slot] = "cafe jello gallery"
if slot == "attraction-name" and label_dict[slot] == "whippple museum":
label_dict[slot] = "whipple museum of the history of science"
if slot == "attraction-type" and label_dict[slot] == "boating":
label_dict[slot] = "boat"
if slot == "attraction-name" and label_dict[slot] == "peoples portraits exhibition":
label_dict[slot] = "people's portraits exhibition at girton college"
if slot == "attraction-name" and label_dict[slot] == "lammas land park":
label_dict[slot] = "sheep's green and lammas land park fen causeway"
# taxi
if slot in ["taxi-destination", "taxi-departure"] and label_dict[slot] == "meze bar":
label_dict[slot] = "meze bar restaurant"
if slot in ["taxi-destination", "taxi-departure"] and label_dict[slot] == "el shaddia guest house":
label_dict[slot] = "el shaddai"
if slot == "taxi-departure" and label_dict[slot] == "centre of town at my hotel":
label_dict[slot] = "hotel"
# train
if slot == "train-departure" and label_dict[slot] in ["liverpool", "london liverpool"]:
label_dict[slot] = "london liverpool street"
if slot == "train-destination" and label_dict[slot] == "liverpool street":
label_dict[slot] = "london liverpool street"
if slot == "train-departure" and label_dict[slot] == "alpha milton":
label_dict[slot] = "alpha-milton"
# hotel
if slot == "hotel-name" and label_dict[slot] == "el shaddia guest house":
label_dict[slot] = "el shaddai"
if slot == "hotel-name" and label_dict[slot] == "alesbray lodge guest house":
label_dict[slot] = "aylesbray lodge guest house"
if slot == "hotel-name" and label_dict[slot] == "the gonvile hotel":
label_dict[slot] = "the gonville hotel"
if slot == "hotel-name" and label_dict[slot] == "no":
label_dict[slot] = "none"
if slot == "hotel-name" and label_dict[slot] in ["holiday inn", "holiday inn cambridge"]:
label_dict[slot] = "express by holiday inn cambridge"
if slot == "hotel-name" and label_dict[slot] == "wartworth":
label_dict[slot] = "warkworth house"
# Suppose to be a wrong annotation
if slot == "restaurant-name" and label_dict[slot] == "south":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "churchill college":
label_dict[slot] = "none"
if slot == "attraction-name" and label_dict[slot] == "boat":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "museum kettles yard":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "hotel":
label_dict[slot] = "none"
if slot == "attraction-type" and label_dict[slot] == "camboats":
label_dict[slot] = "boat"
# TODO: Need to check with dialogue data to deal with strange labels before
# if slot == "restaurant-name" and label_dict[slot] == "eraina and michaelhouse cafe":
# label_dict[slot] = "eraina|michaelhouse cafe"
# if slot == "attraction-name" and label_dict[slot] == "gonville hotel":
# label_dict[slot] = "none"
# if label_dict[slot] == "good luck":
# label_dict[slot] = "the good luck chinese food takeaway"
# if slot == "restaurant-book time" and label_dict[slot] == "9":
# label_dict[slot] = "21:00"
# if slot == "taxi-departure" and label_dict[slot] == "girton college":
# label_dict[slot] = "people's portraits exhibition at girton college"
# if slot == "restaurant-name" and label_dict[slot] == "molecular gastronomy":
# label_dict[slot] = "none"
# [Info] Adding Slot: restaurant-name with value: primavera
# [Info] Adding Slot: train-departure with value: huntingdon
# [Info] Adding Slot: attraction-name with value: aylesbray lodge guest house
# [Info] Adding Slot: attraction-name with value: gallery
# [Info] Adding Slot: hotel-name with value: eraina
# [Info] Adding Slot: restaurant-name with value: india west
# [Info] Adding Slot: restaurant-name with value: autumn house
# [Info] Adding Slot: train-destination with value: norway
# [Info] Adding Slot: attraction-name with value: cinema cinema
# [Info] Adding Slot: hotel-name with value: lan hon
# [Info] Adding Slot: restaurant-food with value: sushi
# [Info] Adding Slot: attraction-name with value: university arms hotel
# [Info] Adding Slot: train-departure with value: stratford
# [Info] Adding Slot: attraction-name with value: history of science museum
# [Info] Adding Slot: restaurant-name with value: nil
# [Info] Adding Slot: train-leaveat with value: 9
# [Info] Adding Slot: restaurant-name with value: ashley hotel
# [Info] Adding Slot: taxi-destination with value: the cambridge shop
# [Info] Adding Slot: hotel-name with value: acorn place
# [Info] Adding Slot: restaurant-name with value: de luca cucina and bar riverside brasserie
# [Info] Adding Slot: hotel-name with value: super 5
# [Info] Adding Slot: attraction-name with value: archway house
# [Info] Adding Slot: train-arriveby with value: 8
# [Info] Adding Slot: train-leaveat with value: 10
# [Info] Adding Slot: restaurant-book time with value: 9
# [Info] Adding Slot: hotel-name with value: nothamilton lodge
# [Info] Adding Slot: attraction-name with value: st christs college
return label_dict
| 24,598 | 59.439803 | 167 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/metrics/measures.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy
import os
import re
import subprocess
import tempfile
import numpy as np
from six.moves import urllib
def word_error_rate(r, h):
"""
This is a function that calculate the word error rate in ASR.
You can use it like this: wer("what is it".split(), "what is".split())
"""
#build the matrix
d = numpy.zeros((len(r)+1)*(len(h)+1), dtype=numpy.uint8).reshape((len(r)+1, len(h)+1))
for i in range(len(r)+1):
for j in range(len(h)+1):
if i == 0: d[0][j] = j
elif j == 0: d[i][0] = i
for i in range(1,len(r)+1):
for j in range(1, len(h)+1):
if r[i-1] == h[j-1]:
d[i][j] = d[i-1][j-1]
else:
substitute = d[i-1][j-1] + 1
insert = d[i][j-1] + 1
delete = d[i-1][j] + 1
d[i][j] = min(substitute, insert, delete)
result = float(d[len(r)][len(h)]) / len(r) * 100
# result = str("%.2f" % result) + "%"
return result
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEU metric implementation.
"""
def moses_multi_bleu(hypotheses, references, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Args:
hypotheses: A numpy array of strings where each string is a single example.
references: A numpy array of strings where each string is a single example.
lowercase: If true, pass the "-lc" flag to the multi-bleu script
Returns:
The BLEU score as a float32 value.
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl")
os.chmod(multi_bleu_path, 0o755)
except: #pylint: disable=W0702
print("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
print("multi-bleu.perl script returned non-zero exit code")
print(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return bleu_score
| 4,190 | 35.12931 | 99 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/loss_function/masked_cross_entropy.py
|
import torch
from torch.nn import functional
from torch.autograd import Variable
from utils.config import *
import torch.nn as nn
import numpy as np
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1)
.expand_as(seq_range_expand))
return seq_range_expand < seq_length_expand
def cross_entropy(logits, target):
batch_size = logits.size(0)
log_probs_flat = functional.log_softmax(logits)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target)
loss = losses_flat.sum() / batch_size
return loss
def masked_cross_entropy(logits, target, length):
"""
Args:
logits: A Variable containing a FloatTensor of size
(batch, max_len, num_classes) which contains the
unnormalized probability for each class.
target: A Variable containing a LongTensor of size
(batch, max_len) which contains the index of the true
class for each corresponding step.
length: A Variable containing a LongTensor of size (batch,)
which contains the length of each data in a batch.
Returns:
loss: An average loss value masked by the length.
"""
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
# logits_flat: (batch * max_len, num_classes)
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
# log_probs_flat: (batch * max_len, num_classes)
log_probs_flat = functional.log_softmax(logits_flat, dim=1)
# target_flat: (batch * max_len, 1)
target_flat = target.view(-1, 1)
# losses_flat: (batch * max_len, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = losses.sum() / length.float().sum()
return loss
def masked_binary_cross_entropy(logits, target, length):
'''
logits: (batch, max_len, num_class)
target: (batch, max_len, num_class)
'''
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
bce_criterion = nn.BCEWithLogitsLoss()
loss = 0
for bi in range(logits.size(0)):
for i in range(logits.size(1)):
if i < length[bi]:
loss += bce_criterion(logits[bi][i], target[bi][i])
loss = loss / length.float().sum()
return loss
def masked_cross_entropy_(logits, target, length, take_log=False):
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
# logits_flat: (batch * max_len, num_classes)
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
if take_log:
logits_flat = torch.log(logits_flat)
# target_flat: (batch * max_len, 1)
target_flat = target.view(-1, 1)
# losses_flat: (batch * max_len, 1)
losses_flat = -torch.gather(logits_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = losses.sum() / length.float().sum()
return loss
def masked_coverage_loss(coverage, attention, length):
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
mask = sequence_mask(sequence_length=length)
min_ = torch.min(coverage, attention)
mask = mask.unsqueeze(2).expand_as(min_)
min_ = min_ * mask.float()
loss = min_.sum() / (len(length)*1.0)
return loss
def masked_cross_entropy_for_slot(logits, target, mask, use_softmax=True):
# print("logits", logits)
# print("target", target)
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
# print(logits_flat.size())
if use_softmax:
log_probs_flat = functional.log_softmax(logits_flat, dim=1)
else:
log_probs_flat = logits_flat #torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s|
losses = losses * mask.float()
loss = losses.sum() / (losses.size(0)*losses.size(1))
# print("loss inside", loss)
return loss
def masked_cross_entropy_for_value(logits, target, mask):
# logits: b * |s| * m * |v|
# target: b * |s| * m
# mask: b * |s|
logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
# print(logits_flat.size())
log_probs_flat = torch.log(logits_flat)
# print("log_probs_flat", log_probs_flat)
target_flat = target.view(-1, 1)
# print("target_flat", target_flat)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size()) # b * |s| * m
loss = masking(losses, mask)
return loss
def masking(losses, mask):
mask_ = []
batch_size = mask.size(0)
max_len = losses.size(2)
for si in range(mask.size(1)):
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if mask[:,si].is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = mask[:, si].unsqueeze(1).expand_as(seq_range_expand)
mask_.append( (seq_range_expand < seq_length_expand) )
mask_ = torch.stack(mask_)
mask_ = mask_.transpose(0, 1)
if losses.is_cuda:
mask_ = mask_.cuda()
losses = losses * mask_.float()
loss = losses.sum() / (mask_.sum().float())
return loss
| 6,501 | 36.802326 | 92 |
py
|
custom-diffusion
|
custom-diffusion-main/sample.py
|
# This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import argparse, os, sys, glob
sys.path.append('stable-diffusion')
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from einops import rearrange
from torchvision.utils import make_grid
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
import wandb
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
token_weights = sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
del sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
m, u = model.load_state_dict(sd, strict=False)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[:token_weights.shape[0]] = token_weights
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--prompt",
type=str,
nargs="?",
default="a painting of a virus monster playing guitar",
help="the prompt to render"
)
parser.add_argument(
"--outdir",
type=str,
nargs="?",
help="dir to write results to",
default="outputs/txt2img-samples"
)
parser.add_argument(
"--skip_grid",
action='store_true',
help="do not save a grid, only individual samples. Helpful when evaluating lots of samples",
)
parser.add_argument(
"--skip_save",
action='store_true',
help="do not save individual samples. For speed measurements.",
)
parser.add_argument(
"--ddim_steps",
type=int,
default=200,
help="number of ddim sampling steps",
)
parser.add_argument(
"--plms",
action='store_true',
help="use plms sampling",
)
parser.add_argument(
"--laion400m",
action='store_true',
help="uses the LAION400M model",
)
parser.add_argument(
"--fixed_code",
action='store_true',
help="if enabled, uses the same starting code across samples ",
)
parser.add_argument(
"--ddim_eta",
type=float,
default=1.0,
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
)
parser.add_argument(
"--n_iter",
type=int,
default=1,
help="sample this often",
)
parser.add_argument(
"--H",
type=int,
default=512,
help="image height, in pixel space",
)
parser.add_argument(
"--W",
type=int,
default=512,
help="image width, in pixel space",
)
parser.add_argument(
"--C",
type=int,
default=4,
help="latent channels",
)
parser.add_argument(
"--f",
type=int,
default=8,
help="downsampling factor",
)
parser.add_argument(
"--n_samples",
type=int,
default=6,
help="how many samples to produce for each given prompt. A.k.a. batch size",
)
parser.add_argument(
"--n_rows",
type=int,
default=6,
help="rows in the grid (default: n_samples)",
)
parser.add_argument(
"--scale",
type=float,
default=6.,
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
)
parser.add_argument(
"--from-file",
type=str,
help="if specified, load prompts from this file",
)
parser.add_argument(
"--config",
type=str,
default="configs/custom-diffusion/finetune.yaml",
help="path to config which constructs model",
)
parser.add_argument(
"--ckpt",
type=str,
required=True,
help="path to checkpoint of the pre-trained model",
)
parser.add_argument(
"--delta_ckpt",
type=str,
default=None,
help="path to delta checkpoint of fine-tuned custom diffusion block",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="the seed (for reproducible sampling)",
)
parser.add_argument(
"--precision",
type=str,
help="evaluate at this precision",
choices=["full", "autocast"],
default="autocast"
)
parser.add_argument(
"--wandb_log",
action='store_true',
help="save grid images to wandb.",
)
parser.add_argument(
"--compress",
action='store_true',
help="delta path provided is a compressed checkpoint.",
)
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="A token to use as a modifier for the concept.",
)
opt = parser.parse_args()
if opt.wandb_log:
if opt.delta_ckpt is not None:
name = opt.delta_ckpt.split('/')[-3]
elif 'checkpoints' in opt.ckpt:
name = opt.ckpt.split('/')[-3]
else:
name = opt.ckpt.split('/')[-1]
wandb.init(project="custom-diffusion", entity="cmu-gil", name=name )
if opt.delta_ckpt is not None:
if len(glob.glob(os.path.join(opt.delta_ckpt.split('checkpoints')[0], "configs/*.yaml"))) > 0:
opt.config = sorted(glob.glob(os.path.join(opt.delta_ckpt.split('checkpoints')[0], "configs/*.yaml")))[-1]
else:
if len(glob.glob(os.path.join(opt.ckpt.split('checkpoints')[0], "configs/*.yaml"))) > 0:
opt.config = sorted(glob.glob(os.path.join(opt.ckpt.split('checkpoints')[0], "configs/*.yaml")))[-1]
seed_everything(opt.seed)
config = OmegaConf.load(f"{opt.config}")
if opt.modifier_token is not None:
config.model.params.cond_stage_config.target = 'src.custom_modules.FrozenCLIPEmbedderWrapper'
config.model.params.cond_stage_config.params = {}
config.model.params.cond_stage_config.params.modifier_token = opt.modifier_token
model = load_model_from_config(config, f"{opt.ckpt}")
if opt.delta_ckpt is not None:
delta_st = torch.load(opt.delta_ckpt)
embed = None
if 'embed' in delta_st['state_dict']:
embed = delta_st['state_dict']['embed'].reshape(-1,768)
del delta_st['state_dict']['embed']
print(embed.shape)
delta_st = delta_st['state_dict']
if opt.compress:
for name in delta_st.keys():
if 'to_k' in name or 'to_v' in name:
delta_st[name] = model.state_dict()[name] + delta_st[name]['u']@delta_st[name]['v']
model.load_state_dict(delta_st, strict=False)
else:
model.load_state_dict(delta_st, strict=False)
if embed is not None:
print("loading new embedding")
print(model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data.shape)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[-embed.shape[0]:] = embed
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = model.to(device)
if opt.plms:
sampler = PLMSSampler(model)
else:
sampler = DDIMSampler(model)
if opt.delta_ckpt is not None:
outpath = os.path.dirname(os.path.dirname(opt.delta_ckpt))
else:
os.makedirs(opt.outdir, exist_ok=True)
outpath = opt.outdir
batch_size = opt.n_samples
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
if not opt.from_file:
prompt = opt.prompt
assert prompt is not None
data = [batch_size * [prompt]]
else:
print(f"reading prompts from {opt.from_file}")
with open(opt.from_file, "r") as f:
data = f.read().splitlines()
data = [batch_size * [prompt] for prompt in data]
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
grid_count = len(os.listdir(outpath)) - 1
start_code = None
if opt.fixed_code:
start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device)
precision_scope = autocast if opt.precision == "autocast" else nullcontext
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
for prompts in tqdm(data, desc="data"):
all_samples = list()
for n in trange(opt.n_iter, desc="Sampling"):
print(prompts[0])
uc = None
if opt.scale != 1.0:
uc = model.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
prompts = list(prompts)
c = model.get_learned_conditioning(prompts)
shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
conditioning=c,
batch_size=opt.n_samples,
shape=shape,
verbose=False,
unconditional_guidance_scale=opt.scale,
unconditional_conditioning=uc,
eta=opt.ddim_eta,
x_T=start_code)
# print(samples_ddim.size())
x_samples_ddim = model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
x_samples_ddim = x_samples_ddim.cpu()
if not opt.skip_save:
for x_sample in x_samples_ddim:
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
img = Image.fromarray(x_sample.astype(np.uint8))
img.save(os.path.join(sample_path, f"{base_count:05}.png"))
base_count += 1
if not opt.skip_grid:
all_samples.append(x_samples_ddim)
if not opt.skip_grid:
# additionally, save as grid
grid = torch.stack(all_samples, 0)
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
grid = make_grid(grid, nrow=n_rows)
# to image
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
img = Image.fromarray(grid.astype(np.uint8))
sampling_method = 'plms' if opt.plms else 'ddim'
img.save(os.path.join(outpath, f'{prompts[0].replace(" ", "-")}_{opt.scale}_{sampling_method}_{opt.ddim_steps}_{opt.ddim_eta}.png'))
if opt.wandb_log:
wandb.log({ f'{prompts[0].replace(" ", "-")}_{opt.scale}_{sampling_method}_{opt.ddim_steps}_{opt.ddim_eta}.png' : [wandb.Image(img)]})
grid_count += 1
print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
f" \nEnjoy.")
if __name__ == "__main__":
main()
| 27,272 | 62.131944 | 1,097 |
py
|
custom-diffusion
|
custom-diffusion-main/train.py
|
# This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import argparse, os, sys, datetime, glob
sys.path.append('stable-diffusion')
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, Dataset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-rc",
"--resume-from-checkpoint-custom",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"--delta-ckpt",
type=str,
const=True,
default=None,
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=False,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument(
"-p",
"--project",
help="name of new or path to existing project"
)
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
parser.add_argument(
"-l",
"--logdir",
type=str,
default="logs",
help="directory for logging dat shit",
)
parser.add_argument(
"--scale_lr",
type=str2bool,
nargs="?",
const=True,
default=True,
help="scale base-lr by ngpu * batch_size * n_accumulate",
)
parser.add_argument(
"--datapath",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_datapath",
type=str,
default=None,
help="path to regularization images",
)
parser.add_argument(
"--caption",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_caption",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--datapath2",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_datapath2",
type=str,
default=None,
help="path to regularization images",
)
parser.add_argument(
"--caption2",
type=str,
default="",
help="path to target images",
)
parser.add_argument(
"--reg_caption2",
type=str,
default="",
help="path to regularization images' caption",
)
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="token added before cateogry word for personalization use case",
)
parser.add_argument(
"--freeze_model",
type=str,
default=None,
help="crossattn to enable fine-tuning of all key, value, query matrices",
)
parser.add_argument(
"--repeat",
type=int,
default=0,
help="repeat the target dataset by how many times. Used when training without regularization",
)
parser.add_argument(
"--batch_size",
type=int,
default=None,
help="overwrite batch size",
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id)
class ConcatDataset(Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, idx):
return tuple(d[idx] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, train2=None, validation=None, test=None, predict=None,
wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False,
shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else batch_size * 2
self.use_worker_init_fn = use_worker_init_fn
if train2 is not None and train2['params']['caption'] != '':
self.dataset_configs["train2"] = train2
if train is not None:
self.dataset_configs["train"] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs["validation"] = validation
self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
if test is not None:
self.dataset_configs["test"] = test
self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
if predict is not None:
self.dataset_configs["predict"] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(
(k, instantiate_from_config(self.dataset_configs[k]))
for k in self.dataset_configs)
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if is_iterable_dataset or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
if "train2" in self.dataset_configs and self.dataset_configs["train2"]['params']["caption"] != '':
train_set = self.datasets["train"]
train2_set = self.datasets["train2"]
concat_dataset = ConcatDataset(train_set, train2_set)
return DataLoader(concat_dataset, batch_size=self.batch_size // 2,
num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
worker_init_fn=init_fn)
else:
return DataLoader(self.datasets["train"], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
worker_init_fn=init_fn)
def _val_dataloader(self, shuffle=False):
if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets["validation"],
batch_size=self.batch_size,
num_workers=self.num_workers,
worker_init_fn=init_fn,
shuffle=shuffle)
def _test_dataloader(self, shuffle=False):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if is_iterable_dataset or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
# do not shuffle dataloader for iterable dataset
shuffle = shuffle and (not is_iterable_dataset)
return DataLoader(self.datasets["test"], batch_size=self.batch_size,
num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
def _predict_dataloader(self, shuffle=False):
if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets["predict"], batch_size=self.batch_size,
num_workers=self.num_workers, worker_init_fn=init_fn)
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_keyboard_interrupt(self, trainer, pl_module):
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def on_pretrain_routine_start(self, trainer, pl_module):
if trainer.global_rank == 0:
# Create logdirs and save configs
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
if "callbacks" in self.lightning_config:
if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']:
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
print("Project config")
print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config,
os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
print("Lightning config")
print(OmegaConf.to_yaml(self.lightning_config))
OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
else:
# ModelCheckpoint callback created log directory --- remove it
if not self.resume and os.path.exists(self.logdir):
dst, name = os.path.split(self.logdir)
dst = os.path.join(dst, "child_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True,
rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
log_images_kwargs=None):
super().__init__()
self.rescale = rescale
self.batch_freq = batch_frequency
self.max_images = max_images
self.save_freq = 250
self.logger_log_images = {
pl.loggers.TestTubeLogger: self._testtube,
}
self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
if not increase_log_steps:
self.log_steps = [self.batch_freq]
self.clamp = clamp
self.disabled = disabled
self.log_on_batch_idx = log_on_batch_idx
self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
self.log_first_step = log_first_step
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
tag = f"{split}/{k}"
pl_module.logger.experiment.add_image(
tag, grid,
global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images,
global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, "images", split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
if self.rescale:
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
grid = grid.numpy()
grid = (grid * 255).astype(np.uint8)
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
k,
global_step,
current_epoch,
batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split="train"):
check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
hasattr(pl_module, "log_images") and
callable(pl_module.log_images) and
self.max_images > 0):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], -1., 1.)
self.log_local(pl_module.logger.save_dir, split, images,
pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, check_idx):
if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
check_idx > 0 or self.log_first_step):
try:
self.log_steps.pop(0)
except IndexError as e:
print(e)
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
self.log_img(pl_module, batch, batch_idx, split="train")
# if self.save_freq is not None:
# epoch = trainer.current_epoch
# global_step = trainer.global_step
# if global_step % self.save_freq == 0:
# filename = f'{epoch}_{global_step}.ckpt'
# ckpt_path = os.path.join(trainer.checkpoint_callback.dirpath, filename)
# trainer.save_checkpoint(ckpt_path)
class CUDACallback(Callback):
# see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py
def on_train_epoch_start(self, trainer, pl_module):
# Reset the memory use counter
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module, outputs):
torch.cuda.synchronize(trainer.root_gpu)
max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20
epoch_time = time.time() - self.start_time
try:
max_memory = trainer.training_type_plugin.reduce(max_memory)
epoch_time = trainer.training_type_plugin.reduce(epoch_time)
rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds")
rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB")
except AttributeError:
pass
if __name__ == "__main__":
# custom parser to specify config files, train, test and debug mode,
# postfix, resume.
# `--key value` arguments are interpreted as arguments to the trainer.
# `nested.key=value` arguments are interpreted as config parameters.
# configs are merged from left-to-right followed by command line parameters.
# model:
# base_learning_rate: float
# target: path to lightning module
# params:
# key: value
# data:
# target: main.DataModuleFromConfig
# params:
# batch_size: int
# wrap: bool
# train:
# target: path to train dataset
# params:
# key: value
# validation:
# target: path to validation dataset
# params:
# key: value
# test:
# target: path to test dataset
# params:
# key: value
# lightning: (optional, has sane defaults and can be specified on cmdline)
# trainer:
# additional arguments to trainer
# logger:
# logger to instantiate
# modelcheckpoint:
# modelcheckpoint to instantiate
# callbacks:
# callback1:
# target: importpath
# params:
# key: value
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
# add cwd for convenience and to make classes in this file available when
# running as `python main.py`
# (in particular `main.DataModuleFromConfig`)
sys.path.append(os.getcwd())
parser = get_parser()
parser = Trainer.add_argparse_args(parser)
opt, unknown = parser.parse_known_args()
if opt.name and opt.resume:
raise ValueError(
"-n/--name and -r/--resume cannot be specified both."
"If you want to resume training in a new log folder, "
"use -n/--name in combination with --resume_from_checkpoint"
)
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
# idx = len(paths)-paths[::-1].index("logs")+1
# logdir = "/".join(paths[:idx])
logdir = "/".join(paths[:-2])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
opt.resume_from_checkpoint = ckpt
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
opt.base = base_configs + opt.base
_tmp = logdir.split("/")
nowname = _tmp[-1]
else:
if opt.name:
name = "_" + opt.name
elif opt.base:
cfg_fname = os.path.split(opt.base[0])[-1]
cfg_name = os.path.splitext(cfg_fname)[0]
name = "_" + cfg_name
else:
name = ""
nowname = now + name + opt.postfix
logdir = os.path.join(opt.logdir, nowname)
ckptdir = os.path.join(logdir, "checkpoints")
cfgdir = os.path.join(logdir, "configs")
seed_everything(opt.seed)
try:
# init and save configs
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
config = OmegaConf.merge(*configs, cli)
lightning_config = config.pop("lightning", OmegaConf.create())
# merge trainer cli with config
trainer_config = lightning_config.get("trainer", OmegaConf.create())
# default to ddp
trainer_config["accelerator"] = "ddp"
for k in nondefault_trainer_args(opt):
trainer_config[k] = getattr(opt, k)
if not ("gpus" in trainer_config):
del trainer_config["accelerator"]
cpu = True
else:
gpuinfo = trainer_config["gpus"]
print(f"Running on GPUs {gpuinfo}")
cpu = False
trainer_opt = argparse.Namespace(**trainer_config)
lightning_config.trainer = trainer_config
# model
config.data.params.train.params.caption = opt.caption
config.data.params.train.params.reg_caption = opt.reg_caption
config.data.params.train.params.datapath = opt.datapath
config.data.params.train.params.reg_datapath = opt.reg_datapath
if opt.caption2 is not None:
config.data.params.train2.params.caption = opt.caption2
config.data.params.train2.params.reg_caption = opt.reg_caption2
config.data.params.train2.params.datapath = opt.datapath2
config.data.params.train2.params.reg_datapath = opt.reg_datapath2
config.data.params.validation = config.data.params.train
if opt.batch_size is not None:
config.data.params.batch_size = opt.batch_size
if opt.modifier_token is not None:
config.model.params.cond_stage_config.params.modifier_token = opt.modifier_token
if opt.repeat > 0:
config.data.params.train.params.repeat = opt.repeat
if opt.resume_from_checkpoint_custom:
config.model.params.ckpt_path = None
if opt.freeze_model is not None:
config.model.params.freeze_model = opt.freeze_model
model = instantiate_from_config(config.model)
if opt.resume_from_checkpoint_custom:
st = torch.load(opt.resume_from_checkpoint_custom, map_location='cpu')["state_dict"]
token_weights = st["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
del st["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
model.load_state_dict(st, strict=False)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[:token_weights.shape[0]] = token_weights
if opt.delta_ckpt is not None:
st = torch.load(opt.delta_ckpt)
embed = None
if 'embed' in st:
embed = st['embed'].reshape(-1, 768)
if 'state_dict' in st:
st = st['state_dict']
print("restroting from delta model from previous version")
st1 = model.state_dict()
for each in st1.keys():
if each in st.keys():
print("found common", each)
model.load_state_dict(st, strict=False)
if embed is not None:
print("restoring embedding")
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[token_weights.shape[0]: token_weights.shape[0] + embed.shape[0]] = embed
# trainer and callbacks
trainer_kwargs = dict()
# default logger configs
default_logger_cfgs = {
"wandb": {
"target": "pytorch_lightning.loggers.WandbLogger",
"params": {
"name": nowname,
"save_dir": logdir,
"offline": opt.debug,
"id": nowname,
}
},
"testtube": {
"target": "pytorch_lightning.loggers.TestTubeLogger",
"params": {
"name": "testtube",
"save_dir": logdir,
}
},
}
default_logger_cfg = default_logger_cfgs["testtube"]
if "logger" in lightning_config:
logger_cfg = lightning_config.logger
else:
logger_cfg = OmegaConf.create()
logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
# modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
# specify which metric is used to determine best models
default_modelckpt_cfg = {
"target": "pytorch_lightning.callbacks.ModelCheckpoint",
"params": {
"dirpath": ckptdir,
"filename": "{epoch:06}",
"verbose": True,
"save_last": True,
}
}
if hasattr(model, "monitor"):
print(f"Monitoring {model.monitor} as checkpoint metric.")
default_modelckpt_cfg["params"]["monitor"] = model.monitor
default_modelckpt_cfg["params"]["save_top_k"] = -1
default_modelckpt_cfg["params"]["every_n_epochs"] = 1
if "modelcheckpoint" in lightning_config:
modelckpt_cfg = lightning_config.modelcheckpoint
else:
modelckpt_cfg = OmegaConf.create()
modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
if version.parse(pl.__version__) < version.parse('1.4.0'):
trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
# add callback which sets up log directory
default_callbacks_cfg = {
"setup_callback": {
"target": "train.SetupCallback",
"params": {
"resume": opt.resume,
"now": now,
"logdir": logdir,
"ckptdir": ckptdir,
"cfgdir": cfgdir,
"config": config,
"lightning_config": lightning_config,
}
},
"image_logger": {
"target": "train.ImageLogger",
"params": {
"batch_frequency": 750,
"max_images": 4,
"clamp": True
}
},
"learning_rate_logger": {
"target": "train.LearningRateMonitor",
"params": {
"logging_interval": "step",
# "log_momentum": True
}
},
"cuda_callback": {
"target": "train.CUDACallback"
},
}
if version.parse(pl.__version__) >= version.parse('1.4.0'):
default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg})
if "callbacks" in lightning_config:
callbacks_cfg = lightning_config.callbacks
else:
callbacks_cfg = OmegaConf.create()
if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg:
print(
'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.')
default_metrics_over_trainsteps_ckpt_dict = {
'metrics_over_trainsteps_checkpoint':
{"target": 'pytorch_lightning.callbacks.ModelCheckpoint',
'params': {
"dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'),
"filename": "{epoch:06}-{step:09}",
"verbose": True,
'save_top_k': -1,
'every_n_train_steps': 50,
'save_weights_only': True
}
}
}
default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'):
callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint
elif 'ignore_keys_callback' in callbacks_cfg:
del callbacks_cfg['ignore_keys_callback']
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
trainer.logdir = logdir
# data
data = instantiate_from_config(config.data)
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
# calling these ourselves should not be necessary but it is.
# lightning still takes care of proper multiprocessing though
data.prepare_data()
data.setup()
print("#### Data #####")
for k in data.datasets:
print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}")
# configure learning rate
bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
if not cpu:
ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
else:
ngpu = 1
if 'accumulate_grad_batches' in lightning_config.trainer:
accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
else:
accumulate_grad_batches = 1
print(f"accumulate_grad_batches = {accumulate_grad_batches}")
lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
if opt.scale_lr:
model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
print(
"Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
else:
model.learning_rate = base_lr
print("++++ NOT USING LR SCALING ++++")
print(f"Setting learning rate to {model.learning_rate:.2e}")
# allow checkpointing via USR1
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb
pudb.set_trace()
import signal
signal.signal(signal.SIGUSR1, melk)
signal.signal(signal.SIGUSR2, divein)
# run
if opt.train:
try:
trainer.fit(model, data)
except Exception:
melk()
raise
if not opt.no_test and not trainer.interrupted:
trainer.test(model, data)
except Exception:
if opt.debug and trainer.global_rank == 0:
try:
import pudb as debugger
except ImportError:
import pdb as debugger
debugger.post_mortem()
raise
finally:
# move newly created debug project to debug_runs
if opt.debug and not opt.resume and trainer.global_rank == 0:
dst, name = os.path.split(logdir)
dst = os.path.join(dst, "debug_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
os.rename(logdir, dst)
if trainer.global_rank == 0:
print(trainer.profiler.summary())
| 48,845 | 48.389282 | 1,097 |
py
|
custom-diffusion
|
custom-diffusion-main/src/diffusers_model_pipeline.py
|
# This code is built from the Huggingface repository: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py, and
# https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
# Copyright 2022- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# ==========================================================================================
#
# modifications are MIT License. To view a copy of the license, visit MIT_LICENSE.md.
#
# ==========================================================================================
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import torch
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from accelerate.logging import get_logger
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.models.cross_attention import CrossAttention
from diffusers.utils.import_utils import is_xformers_available
if is_xformers_available():
import xformers
import xformers.ops
else:
xformers = None
logger = get_logger(__name__)
def set_use_memory_efficient_attention_xformers(
self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None
):
if use_memory_efficient_attention_xformers:
if self.added_kv_proj_dim is not None:
# TODO(Anton, Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP
# which uses this type of cross attention ONLY because the attention mask of format
# [0, ..., -10.000, ..., 0, ...,] is not supported
raise NotImplementedError(
"Memory efficient attention with `xformers` is currently not supported when"
" `self.added_kv_proj_dim` is defined."
)
elif not is_xformers_available():
raise ModuleNotFoundError(
(
"Refer to https://github.com/facebookresearch/xformers for more information on how to install"
" xformers"
),
name="xformers",
)
elif not torch.cuda.is_available():
raise ValueError(
"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is"
" only available for GPU "
)
else:
try:
# Make sure we can run the memory efficient attention
_ = xformers.ops.memory_efficient_attention(
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
)
except Exception as e:
raise e
processor = CustomDiffusionXFormersAttnProcessor(attention_op=attention_op)
else:
processor = CustomDiffusionAttnProcessor()
self.set_processor(processor)
class CustomDiffusionAttnProcessor:
def __call__(
self,
attn: CrossAttention,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
query = attn.to_q(hidden_states)
crossattn = False
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
else:
crossattn = True
if attn.cross_attention_norm:
encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
if crossattn:
detach = torch.ones_like(key)
detach[:, :1, :] = detach[:, :1, :]*0.
key = detach*key + (1-detach)*key.detach()
value = detach*value + (1-detach)*value.detach()
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
class CustomDiffusionXFormersAttnProcessor:
def __init__(self, attention_op: Optional[Callable] = None):
self.attention_op = attention_op
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
query = attn.to_q(hidden_states)
crossattn = False
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
else:
crossattn = True
if attn.cross_attention_norm:
encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
if crossattn:
detach = torch.ones_like(key)
detach[:, :1, :] = detach[:, :1, :]*0.
key = detach*key + (1-detach)*key.detach()
value = detach*value + (1-detach)*value.detach()
query = attn.head_to_batch_dim(query).contiguous()
key = attn.head_to_batch_dim(key).contiguous()
value = attn.head_to_batch_dim(value).contiguous()
hidden_states = xformers.ops.memory_efficient_attention(
query, key, value, attn_bias=attention_mask, op=self.attention_op
)
hidden_states = hidden_states.to(query.dtype)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
class CustomDiffusionPipeline(StableDiffusionPipeline):
r"""
Pipeline for custom diffusion model.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.).
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
modifier_token: list of new modifier tokens added or to be added to text_encoder
modifier_token_id: list of id of new modifier tokens added or to be added to text_encoder
"""
_optional_components = ["safety_checker", "feature_extractor", "modifier_token"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: SchedulerMixin,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
requires_safety_checker: bool = True,
modifier_token: list = [],
modifier_token_id: list = [],
):
super().__init__(vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker,
feature_extractor,
requires_safety_checker)
# change attn class
self.modifier_token = modifier_token
self.modifier_token_id = modifier_token_id
def add_token(self, initializer_token):
initializer_token_id = []
for modifier_token_, initializer_token_ in zip(self.modifier_token, initializer_token):
# Add the placeholder token in tokenizer
num_added_tokens = self.tokenizer.add_tokens(modifier_token_)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token_}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = self.tokenizer.encode([initializer_token_], add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
self.modifier_token_id.append(self.tokenizer.convert_tokens_to_ids(modifier_token_))
initializer_token_id.append(token_ids[0])
# Resize the token embeddings as we are adding new special tokens to the tokenizer
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = self.text_encoder.get_input_embeddings().weight.data
for (x, y) in zip(self.modifier_token_id, initializer_token_id):
token_embeds[x] = token_embeds[y]
def save_pretrained(self, save_path, freeze_model="crossattn_kv", save_text_encoder=False, all=False):
if all:
super().save_pretrained(save_path)
else:
delta_dict = {'unet': {}, 'modifier_token': {}}
if self.modifier_token is not None:
for i in range(len(self.modifier_token_id)):
learned_embeds = self.text_encoder.get_input_embeddings().weight[self.modifier_token_id[i]]
delta_dict['modifier_token'][self.modifier_token[i]] = learned_embeds.detach().cpu()
if save_text_encoder:
delta_dict['text_encoder'] = self.text_encoder.state_dict()
for name, params in self.unet.named_parameters():
if freeze_model == "crossattn":
if 'attn2' in name:
delta_dict['unet'][name] = params.cpu().clone()
elif freeze_model == "crossattn_kv":
if 'attn2.to_k' in name or 'attn2.to_v' in name:
delta_dict['unet'][name] = params.cpu().clone()
else:
raise ValueError(
"freeze_model argument only supports crossattn_kv or crossattn"
)
torch.save(delta_dict, save_path)
def load_model(self, save_path, compress=False):
st = torch.load(save_path)
if 'text_encoder' in st:
self.text_encoder.load_state_dict(st['text_encoder'])
if 'modifier_token' in st:
modifier_tokens = list(st['modifier_token'].keys())
modifier_token_id = []
for modifier_token in modifier_tokens:
num_added_tokens = self.tokenizer.add_tokens(modifier_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
modifier_token_id.append(self.tokenizer.convert_tokens_to_ids(modifier_token))
# Resize the token embeddings as we are adding new special tokens to the tokenizer
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
token_embeds = self.text_encoder.get_input_embeddings().weight.data
for i, id_ in enumerate(modifier_token_id):
token_embeds[id_] = st['modifier_token'][modifier_tokens[i]]
for name, params in self.unet.named_parameters():
if 'attn2' in name:
if compress and ('to_k' in name or 'to_v' in name):
params.data += st['unet'][name]['u']@st['unet'][name]['v']
elif name in st['unet']:
params.data.copy_(st['unet'][f'{name}'])
| 25,547 | 50.198397 | 149 |
py
|
custom-diffusion
|
custom-diffusion-main/src/diffusers_training.py
|
# This code is built from the Huggingface repository: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py, and
# https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
# Copyright 2022- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# ==========================================================================================
#
# modifications are MIT License. To view a copy of the license, visit MIT_LICENSE.md.
#
# ==========================================================================================
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import hashlib
import itertools
import logging
import math
import os
from pathlib import Path
from typing import Optional
import torch
import json
import numpy as np
import torch.nn.functional as F
import torch.utils.checkpoint
from packaging import version
import transformers
import diffusers
from accelerate.logging import get_logger
from accelerate import Accelerator
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
from diffusers.optimization import get_scheduler
from huggingface_hub import HfFolder, Repository, create_repo, whoami
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from diffusers.models.cross_attention import CrossAttention
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils import check_min_version, is_wandb_available
sys.path.append('./')
from src.diffusers_model_pipeline import CustomDiffusionAttnProcessor, CustomDiffusionPipeline, set_use_memory_efficient_attention_xformers
from src.diffusers_data_pipeline import CustomDiffusionDataset, PromptDataset, collate_fn
from src import retrieve
check_min_version("0.14.0")
logger = get_logger(__name__)
def create_custom_diffusion(unet, freeze_model):
for name, params in unet.named_parameters():
if freeze_model == 'crossattn':
if 'attn2' in name:
params.requires_grad = True
print(name)
else:
params.requires_grad = False
elif freeze_model == "crossattn_kv":
if 'attn2.to_k' in name or 'attn2.to_v' in name:
params.requires_grad = True
print(name)
else:
params.requires_grad = False
else:
raise ValueError(
"freeze_model argument only supports crossattn_kv or crossattn"
)
# change attn class
def change_attn(unet):
for layer in unet.children():
if type(layer) == CrossAttention:
bound_method = set_use_memory_efficient_attention_xformers.__get__(layer, layer.__class__)
setattr(layer, 'set_use_memory_efficient_attention_xformers', bound_method)
else:
change_attn(layer)
change_attn(unet)
unet.set_attn_processor(CustomDiffusionAttnProcessor())
return unet
def freeze_params(params):
for param in params:
param.requires_grad = False
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
else:
raise ValueError(f"{model_class} is not supported.")
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument(
"--real_prior",
default=False,
action="store_true",
help="real images as prior.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=100,
help=(
"Minimal class images for prior preservation loss. If there are not enough images already present in"
" class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="custom-diffusion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--freeze_model",
type=str,
default='crossattn_kv',
help="crossattn to enable fine-tuning of all key, value, query matrices",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument(
"--concepts_list",
type=str,
default=None,
help="Path to json containing multiple concepts, will overwrite parameters like instance_prompt, class_prompt, etc.",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="A token to use as a modifier for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default='ktn+pll+ucd', help="A token to use as initializer word."
)
parser.add_argument("--hflip", action="store_true", help="Apply horizontal flip data augmentation.")
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.with_prior_preservation:
if args.concepts_list is None:
if args.class_data_dir is None:
raise ValueError("You must specify a data directory for class images.")
if args.class_prompt is None:
raise ValueError("You must specify prompt for class images.")
else:
if args.class_data_dir is not None:
logger.warning("You need not use --class_data_dir without --with_prior_preservation.")
if args.class_prompt is not None:
logger.warning("You need not use --class_prompt without --with_prior_preservation.")
return args
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
logging_dir=logging_dir,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
import wandb
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
if args.concepts_list is None:
args.concepts_list = [
{
"instance_prompt": args.instance_prompt,
"class_prompt": args.class_prompt,
"instance_data_dir": args.instance_data_dir,
"class_data_dir": args.class_data_dir
}
]
else:
with open(args.concepts_list, "r") as f:
args.concepts_list = json.load(f)
if args.with_prior_preservation:
for i, concept in enumerate(args.concepts_list):
class_images_dir = Path(concept['class_data_dir'])
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True, exist_ok=True)
if args.real_prior:
if accelerator.is_main_process:
if not Path(os.path.join(class_images_dir, 'images')).exists() or len(list(Path(os.path.join(class_images_dir, 'images')).iterdir())) < args.num_class_images:
retrieve.retrieve(concept['class_prompt'], class_images_dir, args.num_class_images)
concept['class_prompt'] = os.path.join(class_images_dir, 'caption.txt')
concept['class_data_dir'] = os.path.join(class_images_dir, 'images.txt')
args.concepts_list[i] = concept
accelerator.wait_for_everyone()
else:
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < args.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
if args.prior_generation_precision == "fp32":
torch_dtype = torch.float32
elif args.prior_generation_precision == "fp16":
torch_dtype = torch.float16
elif args.prior_generation_precision == "bf16":
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
torch_dtype=torch_dtype,
safety_checker=None,
revision=args.revision,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
logger.info(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(concept['class_prompt'], num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
):
images = pipeline(example["prompt"], num_inference_steps=50, guidance_scale=6., eta=1.).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
create_repo(repo_name, exist_ok=True, token=args.hub_token)
repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load the tokenizer
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name,
revision=args.revision,
use_fast=False,
)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# import correct text encoder class
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = text_encoder_cls.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
)
vae.requires_grad_(False)
if not args.train_text_encoder and args.modifier_token is None:
text_encoder.requires_grad_(False)
unet = create_custom_diffusion(unet, args.freeze_model)
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
if accelerator.mixed_precision != "fp16":
unet.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
## check this##
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if args.train_text_encoder or args.modifier_token is not None:
text_encoder.gradient_checkpointing_enable()
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
if args.with_prior_preservation:
args.learning_rate = args.learning_rate*2.
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Adding a modifier token which is optimized ####
# Code taken from https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
modifier_token_id = []
initializer_token_id = []
if args.modifier_token is not None:
args.modifier_token = args.modifier_token.split('+')
args.initializer_token = args.initializer_token.split('+')
if len(args.modifier_token) > len(args.initializer_token):
raise ValueError("You must specify + separated initializer token for each modifier token.")
for modifier_token, initializer_token in zip(args.modifier_token, args.initializer_token[:len(args.modifier_token)]):
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(modifier_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode([initializer_token], add_special_tokens=False)
print(token_ids)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id.append(token_ids[0])
modifier_token_id.append(tokenizer.convert_tokens_to_ids(modifier_token))
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
for (x,y) in zip(modifier_token_id,initializer_token_id):
token_embeds[x] = token_embeds[y]
# Freeze all parameters except for the token embeddings in text encoder
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
if args.freeze_model == 'crossattn':
params_to_optimize = itertools.chain( text_encoder.get_input_embeddings().parameters() , [x[1] for x in unet.named_parameters() if 'attn2' in x[0]] )
else:
params_to_optimize = itertools.chain( text_encoder.get_input_embeddings().parameters() , [x[1] for x in unet.named_parameters() if ('attn2.to_k' in x[0] or 'attn2.to_v' in x[0])] )
########################################################
########################################################
else:
if args.freeze_model == 'crossattn':
params_to_optimize = (
itertools.chain([x[1] for x in unet.named_parameters() if 'attn2' in x[0]], text_encoder.parameters() if args.train_text_encoder else [] )
)
else:
params_to_optimize = (
itertools.chain([x[1] for x in unet.named_parameters() if ('attn2.to_k' in x[0] or 'attn2.to_v' in x[0])], text_encoder.parameters() if args.train_text_encoder else [] )
)
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
train_dataset = CustomDiffusionDataset(
concepts_list=args.concepts_list,
tokenizer=tokenizer,
with_prior_preservation=args.with_prior_preservation,
size=args.resolution,
center_crop=args.center_crop,
num_class_images=args.num_class_images,
hflip=args.hflip
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
num_workers=args.dataloader_num_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
if args.train_text_encoder or args.modifier_token is not None:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("custom-diffusion")
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
global_step = 0
for epoch in range(args.num_train_epochs):
unet.train()
if args.train_text_encoder or args.modifier_token is not None:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
if args.with_prior_preservation:
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
target, target_prior = torch.chunk(target, 2, dim=0)
mask = torch.chunk(batch["mask"], 2, dim=0)[0]
# Compute instance loss
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
loss = ((loss*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])).mean()
# Compute prior loss
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
mask = batch["mask"]
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
loss = ((loss*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])).mean()
accelerator.backward(loss)
# Zero out the gradients for all token embeddings except the newly added
# embeddings for the concept, as we only want to optimize the concept embeddings
if args.modifier_token is not None:
if accelerator.num_processes > 1:
grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad
else:
grads_text_encoder = text_encoder.get_input_embeddings().weight.grad
# Get the index for tokens that we want to zero the grads for
index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0]
for i in range(len(modifier_token_id[1:])):
index_grads_to_zero = index_grads_to_zero & (torch.arange(len(tokenizer)) != modifier_token_id[i])
grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[index_grads_to_zero, :].fill_(0)
if accelerator.sync_gradients:
params_to_clip = (
itertools.chain([x[1] for x in unet.named_parameters() if ('attn2' in x[0])], text_encoder.parameters())
if (args.train_text_encoder or args.modifier_token is not None)
else itertools.chain([x[1] for x in unet.named_parameters() if ('attn2' in x[0])])
)
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if global_step % args.save_steps == 0:
if accelerator.is_main_process:
pipeline = CustomDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
revision=args.revision,
modifier_token=args.modifier_token,
modifier_token_id=modifier_token_id,
)
save_path = os.path.join(args.output_dir, f"delta-{global_step}.bin")
pipeline.save_pretrained(save_path, freeze_model=args.freeze_model)
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
if accelerator.is_main_process:
# create pipeline
unet = unet.to(torch.float32)
pipeline = CustomDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
revision=args.revision,
modifier_token=args.modifier_token,
modifier_token_id=modifier_token_id,
)
save_path = os.path.join(args.output_dir, f"delta.bin")
pipeline.save_pretrained(save_path, freeze_model=args.freeze_model)
if args.validation_prompt is not None:
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
f" {args.validation_prompt}."
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
images = [
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
for _ in range(args.num_validation_images)
]
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"validation": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
for i, image in enumerate(images)
]
}
)
del pipeline
torch.cuda.empty_cache()
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| 51,778 | 45.816456 | 253 |
py
|
custom-diffusion
|
custom-diffusion-main/src/compress.py
|
# Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import torch
import argparse
def compress(delta_ckpt, ckpt, diffuser=False, compression_ratio=0.6, device='cuda'):
st = torch.load(f'{delta_ckpt}')
if not diffuser:
compressed_key = 'state_dict'
compressed_st = {compressed_key: {}}
pretrained_st = torch.load(ckpt)['state_dict']
if 'embed' in st['state_dict']:
compressed_st['state_dict']['embed'] = st['state_dict']['embed']
del st['state_dict']['embed']
st = st['state_dict']
else:
from diffusers import StableDiffusionPipeline
compressed_key = 'unet'
compressed_st = {compressed_key: {}}
pretrained_st = StableDiffusionPipeline.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda")
pretrained_st = pretrained_st.unet.state_dict()
if 'modifier_token' in st:
compressed_st['modifier_token'] = st['modifier_token']
st = st['unet']
print("getting compression")
layers = list(st.keys())
for name in layers:
if 'to_k' in name or 'to_v' in name:
W = st[name].to(device)
Wpretrain = pretrained_st[name].clone().to(device)
deltaW = W-Wpretrain
u, s, vt = torch.linalg.svd(deltaW.clone())
explain = 0
all_ = (s).sum()
for i, t in enumerate(s):
explain += t/(all_)
if explain > compression_ratio:
break
compressed_st[compressed_key][f'{name}'] = {}
compressed_st[compressed_key][f'{name}']['u'] = (u[:, :i]@torch.diag(s)[:i, :i]).clone()
compressed_st[compressed_key][f'{name}']['v'] = vt[:i].clone()
else:
compressed_st[compressed_key][f'{name}'] = st[name]
name = delta_ckpt.replace('delta', 'compressed_delta')
torch.save(compressed_st, f'{name}')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--delta_ckpt', help='path of checkpoint to compress',
type=str)
parser.add_argument('--ckpt', help='path of pretrained model checkpoint',
type=str)
parser.add_argument("--diffuser", action='store_true')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
compress(args.delta_ckpt, args.ckpt, args.diffuser)
| 2,479 | 34.428571 | 107 |
py
|
custom-diffusion
|
custom-diffusion-main/src/diffusers_data_pipeline.py
|
# This code is built from the Huggingface repository: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py, and
# https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
# Copyright 2022- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from pathlib import Path
import numpy as np
import PIL
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
def preprocess(image, scale, resample):
image = image.resize((scale, scale), resample=resample)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
return image
def collate_fn(examples, with_prior_preservation):
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
mask = [example["mask"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
mask += [example["class_mask"] for example in examples]
input_ids = torch.cat(input_ids, dim=0)
pixel_values = torch.stack(pixel_values)
mask = torch.stack(mask)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
mask = mask.to(memory_format=torch.contiguous_format).float()
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
"mask": mask.unsqueeze(1)
}
return batch
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
class CustomDiffusionDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
concepts_list,
tokenizer,
size=512,
center_crop=False,
with_prior_preservation=False,
num_class_images=200,
hflip=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.interpolation = PIL.Image.BILINEAR
self.instance_images_path = []
self.class_images_path = []
self.with_prior_preservation = with_prior_preservation
for concept in concepts_list:
inst_img_path = [(x, concept["instance_prompt"]) for x in Path(concept["instance_data_dir"]).iterdir() if x.is_file()]
self.instance_images_path.extend(inst_img_path)
if with_prior_preservation:
class_data_root = Path(concept["class_data_dir"])
if os.path.isdir(class_data_root):
class_images_path = list(class_data_root.iterdir())
class_prompt = [concept["class_prompt"] for _ in range(len(class_images_path))]
else:
with open(class_data_root, "r") as f:
class_images_path = f.read().splitlines()
with open(concept["class_prompt"], "r") as f:
class_prompt = f.read().splitlines()
class_img_path = [(x, y) for (x, y) in zip(class_images_path, class_prompt)]
self.class_images_path.extend(class_img_path[:num_class_images])
random.shuffle(self.instance_images_path)
self.num_instance_images = len(self.instance_images_path)
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.flip = transforms.RandomHorizontalFlip(0.5 * hflip)
self.image_transforms = transforms.Compose(
[
self.flip,
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image, instance_prompt = self.instance_images_path[index % self.num_instance_images]
instance_image = Image.open(instance_image)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
instance_image = self.flip(instance_image)
##############################################################################
#### apply resize augmentation and create a valid image region mask ##########
##############################################################################
if np.random.randint(0, 3) < 2:
random_scale = np.random.randint(self.size // 3, self.size+1)
else:
random_scale = np.random.randint(int(1.2*self.size), int(1.4*self.size))
if random_scale % 2 == 1:
random_scale += 1
if random_scale < 0.6*self.size:
add_to_caption = np.random.choice(["a far away ", "very small "])
instance_prompt = add_to_caption + instance_prompt
cx = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
cy = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
instance_image1 = preprocess(instance_image, random_scale, self.interpolation)
instance_image = np.zeros((self.size, self.size, 3), dtype=np.float32)
instance_image[cx - random_scale // 2: cx + random_scale // 2, cy - random_scale // 2: cy + random_scale // 2, :] = instance_image1
mask = np.zeros((self.size // 8, self.size // 8))
mask[(cx - random_scale // 2) // 8 + 1: (cx + random_scale // 2) // 8 - 1, (cy - random_scale // 2) // 8 + 1: (cy + random_scale // 2) // 8 - 1] = 1.
elif random_scale > self.size:
add_to_caption = np.random.choice(["zoomed in ", "close up "])
instance_prompt = add_to_caption + instance_prompt
cx = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
cy = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
instance_image = preprocess(instance_image, random_scale, self.interpolation)
instance_image = instance_image[cx - self.size // 2: cx + self.size // 2, cy - self.size // 2: cy + self.size // 2, :]
mask = np.ones((self.size // 8, self.size // 8))
else:
instance_image = preprocess(instance_image, self.size, self.interpolation)
mask = np.ones((self.size // 8, self.size // 8))
########################################################################
example["instance_images"] = torch.from_numpy(instance_image).permute(2, 0, 1)
example["mask"] = torch.from_numpy(mask)
example["instance_prompt_ids"] = self.tokenizer(
instance_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
if self.with_prior_preservation:
class_image, class_prompt = self.class_images_path[index % self.num_class_images]
class_image = Image.open(class_image)
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_mask"] = torch.ones_like(example["mask"])
example["class_prompt_ids"] = self.tokenizer(
class_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
return example
| 20,730 | 50.061576 | 161 |
py
|
custom-diffusion
|
custom-diffusion-main/src/diffusers_composenW.py
|
# Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import sys
import os
import argparse
import torch
from scipy.linalg import lu_factor, lu_solve
sys.path.append('./')
from diffusers import StableDiffusionPipeline
from src import diffusers_sample
def gdupdateWexact(K, V, Ktarget1, Vtarget1, W, device='cuda'):
input_ = K
output = V
C = input_.T@input_
d = []
lu, piv = lu_factor(C.cpu().numpy())
for i in range(Ktarget1.size(0)):
sol = lu_solve((lu, piv), Ktarget1[i].reshape(-1, 1).cpu().numpy())
d.append(torch.from_numpy(sol).to(K.device))
d = torch.cat(d, 1).T
e2 = [email protected]
e1 = (Vtarget1.T - [email protected])
delta = [email protected](e2)
Wnew = W + delta@d
lambda_split1 = Vtarget1.size(0)
input_ = torch.cat([Ktarget1.T, K.T], dim=1)
output = torch.cat([Vtarget1, V], dim=0)
loss = torch.norm((Wnew@input_).T - output, 2, dim=1)
print(loss[:lambda_split1].mean().item(), loss[lambda_split1:].mean().item())
return Wnew
def compose(paths, category, outpath, pretrained_model_path, regularization_prompt, prompts, save_path, device='cuda'):
model_id = pretrained_model_path
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
layers_modified = []
for name, param in pipe.unet.named_parameters():
if 'attn2.to_k' in name or 'attn2.to_v' in name:
layers_modified.append(name)
tokenizer = pipe.tokenizer
def get_text_embedding(prompts):
with torch.no_grad():
uc = []
for text in prompts:
tokens = tokenizer(text,
truncation=True,
max_length=tokenizer.model_max_length,
return_length=True,
return_overflowing_tokens=False,
padding="do_not_pad",
).input_ids
if 'photo of a' in text[:15]:
print(text)
uc.append(pipe.text_encoder(torch.cuda.LongTensor(tokens).reshape(1,-1))[0][:, 4:].reshape(-1, 768))
else:
uc.append(pipe.text_encoder(torch.cuda.LongTensor(tokens).reshape(1,-1))[0][:, 1:].reshape(-1, 768))
return torch.cat(uc, 0).float()
embeds = {}
count = 1
model2_sts = []
modifier_tokens = []
modifier_token_ids = []
categories = []
for path1, cat1 in zip(paths.split('+'), category.split('+')):
model2_st = torch.load(path1)
if 'modifier_token' in model2_st:
# composition of models with individual concept only
key = list(model2_st['modifier_token'].keys())[0]
_ = tokenizer.add_tokens(f'<new{count}>')
modifier_token_ids.append(tokenizer.convert_tokens_to_ids(f'<new{count}>'))
modifier_tokens.append(True)
embeds[f'<new{count}>'] = model2_st['modifier_token'][key]
else:
modifier_tokens.append(False)
model2_sts.append(model2_st['unet'])
categories.append(cat1)
count += 1
pipe.text_encoder.resize_token_embeddings(len(tokenizer))
token_embeds = pipe.text_encoder.get_input_embeddings().weight.data
for (x, y) in zip(modifier_token_ids, list(embeds.keys())):
token_embeds[x] = embeds[y]
print(x, y, "added embeddings")
f = open(regularization_prompt, 'r')
prompt = [x.strip() for x in f.readlines()][:200]
uc = get_text_embedding(prompt)
uc_targets = []
from collections import defaultdict
uc_values = defaultdict(list)
for composing_model_count in range(len(model2_sts)):
category = categories[composing_model_count]
if modifier_tokens[composing_model_count]:
string1 = f'<new{composing_model_count+1}> {category}'
else:
string1 = f'{category}'
if 'art' in string1:
prompt = [string1] + [f"painting in the style of {string1}"]
else:
prompt = [string1] + [f"photo of a {string1}"]
uc_targets.append(get_text_embedding(prompt))
for each in layers_modified:
uc_values[each].append((model2_sts[composing_model_count][each].to(device)@uc_targets[-1].T).T)
uc_targets = torch.cat(uc_targets, 0)
removal_indices = []
for i in range(uc_targets.size(0)):
for j in range(i+1, uc_targets.size(0)):
if (uc_targets[i]-uc_targets[j]).abs().mean() == 0:
removal_indices.append(j)
removal_indices = list(set(removal_indices))
uc_targets = torch.stack([uc_targets[i] for i in range(uc_targets.size(0)) if i not in removal_indices], 0)
for each in layers_modified:
uc_values[each] = torch.cat(uc_values[each], 0)
uc_values[each] = torch.stack([uc_values[each][i] for i in range(uc_values[each].size(0)) if i not in removal_indices], 0)
print(uc_values[each].size(), each)
print("target size:", uc_targets.size())
new_weights = {'unet': {}}
for each in layers_modified:
W = pipe.unet.state_dict()[each].float()
values = ([email protected]).T
input_target = uc_targets
output_target = uc_values[each]
Wnew = gdupdateWexact(uc[:values.shape[0]],
values,
input_target,
output_target,
W.clone(),
)
new_weights['unet'][each] = Wnew
print(Wnew.size())
new_weights['modifier_token'] = embeds
os.makedirs(f'{save_path}/{outpath}', exist_ok=True)
torch.save(new_weights, f'{save_path}/{outpath}/delta.bin')
if prompts is not None:
if os.path.exists(prompts):
diffusers_sample.sample(model_id, f'{save_path}/{outpath}/delta.bin', prompts, prompt=None, compress=False, freeze_model='crossattn_kv', batch_size=1)
else:
diffusers_sample.sample(model_id, f'{save_path}/{outpath}/delta.bin', from_file=None, prompt=prompts, compress=False, freeze_model='crossattn_kv', batch_size=1)
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--paths', help='+ separated list of checkpoints', required=True,
type=str)
parser.add_argument('--save_path', help='folder name to save optimized weights', default='optimized_logs',
type=str)
parser.add_argument('--categories', help='+ separated list of categories of the models', required=True,
type=str)
parser.add_argument('--prompts', help='prompts for composition model (can be a file or string)', default=None,
type=str)
parser.add_argument('--ckpt', required=True,
type=str)
parser.add_argument('--regularization_prompt', default='./data/regularization_captions.txt',
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
paths = args.paths
categories = args.categories
if ' ' in categories:
temp = categories.replace(' ', '_')
else:
temp = categories
outpath = '_'.join(['optimized', temp])
compose(paths, categories, outpath, args.ckpt, args.regularization_prompt, args.prompts, args.save_path)
| 7,508 | 37.706186 | 172 |
py
|
custom-diffusion
|
custom-diffusion-main/src/model.py
|
# This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import torch
from einops import rearrange, repeat
from torch import nn, einsum
from ldm.models.diffusion.ddpm import LatentDiffusion as LatentDiffusion
from ldm.util import default
from ldm.modules.attention import BasicTransformerBlock as BasicTransformerBlock
from ldm.modules.attention import CrossAttention as CrossAttention
from ldm.util import log_txt_as_img, exists, ismap, isimage, mean_flat, count_params, instantiate_from_config
from torchvision.utils import make_grid
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
import numpy as np
class CustomDiffusion(LatentDiffusion):
def __init__(self,
freeze_model='crossattn-kv',
cond_stage_trainable=False,
add_token=False,
*args, **kwargs):
self.freeze_model = freeze_model
self.add_token = add_token
self.cond_stage_trainable = cond_stage_trainable
super().__init__(cond_stage_trainable=cond_stage_trainable, *args, **kwargs)
if self.freeze_model == 'crossattn-kv':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' not in x[0]:
x[1].requires_grad = False
elif not ('attn2.to_k' in x[0] or 'attn2.to_v' in x[0]):
x[1].requires_grad = False
else:
x[1].requires_grad = True
elif self.freeze_model == 'crossattn':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' not in x[0]:
x[1].requires_grad = False
elif not 'attn2' in x[0]:
x[1].requires_grad = False
else:
x[1].requires_grad = True
def change_checkpoint(model):
for layer in model.children():
if type(layer) == BasicTransformerBlock:
layer.checkpoint = False
else:
change_checkpoint(layer)
change_checkpoint(self.model.diffusion_model)
def new_forward(self, x, context=None, mask=None):
h = self.heads
crossattn = False
if context is not None:
crossattn = True
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
if crossattn:
modifier = torch.ones_like(k)
modifier[:, :1, :] = modifier[:, :1, :]*0.
k = modifier*k + (1-modifier)*k.detach()
v = modifier*v + (1-modifier)*v.detach()
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def change_forward(model):
for layer in model.children():
if type(layer) == CrossAttention:
bound_method = new_forward.__get__(layer, layer.__class__)
setattr(layer, 'forward', bound_method)
else:
change_forward(layer)
change_forward(self.model.diffusion_model)
def configure_optimizers(self):
lr = self.learning_rate
params = []
if self.freeze_model == 'crossattn-kv':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' in x[0]:
if 'attn2.to_k' in x[0] or 'attn2.to_v' in x[0]:
params += [x[1]]
print(x[0])
elif self.freeze_model == 'crossattn':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' in x[0]:
if 'attn2' in x[0]:
params += [x[1]]
print(x[0])
else:
params = list(self.model.parameters())
if self.cond_stage_trainable:
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
if self.add_token:
params = params + list(self.cond_stage_model.transformer.text_model.embeddings.token_embedding.parameters())
else:
params = params + list(self.cond_stage_model.parameters())
if self.learn_logvar:
print('Diffusion model optimizing logvar')
params.append(self.logvar)
opt = torch.optim.AdamW(params, lr=lr)
if self.use_scheduler:
assert 'target' in self.scheduler_config
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [opt], scheduler
return opt
def p_losses(self, x_start, cond, t, mask=None, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond)
loss_dict = {}
prefix = 'train' if self.training else 'val'
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
loss_simple = self.get_loss(model_output, target, mean=False)
if mask is not None:
loss_simple = (loss_simple*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])
else:
loss_simple = loss_simple.mean([1, 2, 3])
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
logvar_t = (self.logvar.to(self.device))[t]
loss = loss_simple / torch.exp(logvar_t) + logvar_t
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
if self.learn_logvar:
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
loss_dict.update({'logvar': self.logvar.data.mean()})
loss = self.l_simple_weight * loss.mean()
loss_vlb = self.get_loss(model_output, target, mean=False)
if mask is not None:
loss_vlb = (loss_vlb*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])
else:
loss_vlb = loss_vlb.mean([1, 2, 3])
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
loss += (self.original_elbo_weight * loss_vlb)
loss_dict.update({f'{prefix}/loss': loss})
return loss, loss_dict
@torch.no_grad()
def get_input_withmask(self, batch, **args):
out = super().get_input(batch, self.first_stage_key, **args)
mask = batch["mask"]
if len(mask.shape) == 3:
mask = mask[..., None]
mask = rearrange(mask, 'b h w c -> b c h w')
mask = mask.to(memory_format=torch.contiguous_format).float()
out += [mask]
return out
def training_step(self, batch, batch_idx):
if isinstance(batch, list):
train_batch = batch[0]
train2_batch = batch[1]
loss_train, loss_dict = self.shared_step(train_batch)
loss_train2, _ = self.shared_step(train2_batch)
loss = loss_train + loss_train2
else:
train_batch = batch
loss, loss_dict = self.shared_step(train_batch)
self.log_dict(loss_dict, prog_bar=True,
logger=True, on_step=True, on_epoch=True)
self.log("global_step", self.global_step,
prog_bar=True, logger=True, on_step=True, on_epoch=False)
if self.use_scheduler:
lr = self.optimizers().param_groups[0]['lr']
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
return loss
def shared_step(self, batch, **kwargs):
x, c, mask = self.get_input_withmask(batch, **kwargs)
loss = self(x, c, mask=mask)
return loss
@torch.no_grad()
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
plot_diffusion_rows=True, **kwargs):
use_ddim = ddim_steps is not None
log = dict()
if isinstance(batch, list):
batch = batch[0]
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=True,
return_original_cond=True,
bs=N)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
log["inputs"] = x
log["reconstruction"] = xrec
if self.model.conditioning_key is not None:
if hasattr(self.cond_stage_model, "decode"):
xc = self.cond_stage_model.decode(c)
log["conditioning"] = xc
elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
log["conditioning"] = xc
elif self.cond_stage_key == 'class_label':
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['conditioning'] = xc
elif isimage(xc):
log["conditioning"] = xc
if ismap(xc):
log["original_conditioning"] = self.to_rgb(xc)
if plot_diffusion_rows:
# get diffusion row
diffusion_row = list()
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(z_start)
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
diffusion_row.append(self.decode_first_stage(z_noisy))
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
log["diffusion_row"] = diffusion_grid
if sample:
# get denoise row
with self.ema_scope("Plotting"):
unconditional_guidance_scale=6.
unconditional_conditioning = self.get_learned_conditioning(len(c) * [""])
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
unconditional_conditioning=unconditional_conditioning, unconditional_guidance_scale=unconditional_guidance_scale)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
x_samples = self.decode_first_stage(samples)
log["samples_scaled"] = x_samples
if plot_denoise_rows:
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
# also display when quantizing x0 while sampling
with self.ema_scope("Plotting Quantized Denoised"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
quantize_denoised=True)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
# quantize_denoised=True)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_x0_quantized"] = x_samples
if inpaint:
# make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
mask = mask[:, None, ...]
with self.ema_scope("Plotting Inpaint"):
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_inpainting"] = x_samples
log["mask"] = mask
# outpaint
with self.ema_scope("Plotting Outpaint"):
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_outpainting"] = x_samples
if plot_progressive_rows:
with self.ema_scope("Plotting Progressives"):
img, progressives = self.progressive_denoising(c,
shape=(self.channels, self.image_size, self.image_size),
batch_size=N)
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
log["progressive_row"] = prog_row
if return_keys:
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
return log
else:
return {key: log[key] for key in return_keys}
return log
| 29,720 | 70.102871 | 1,097 |
py
|
custom-diffusion
|
custom-diffusion-main/src/custom_modules.py
|
# This code is built from the Huggingface repository: https://github.com/huggingface/transformers/tree/main/src/transformers/models/clip.
# Copyright 2018- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# Apache License, Version 2.0, January 2004
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
import torch
import torch.nn as nn
import transformers
from transformers import CLIPTokenizer, CLIPTextModel
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class FrozenCLIPEmbedderWrapper(AbstractEncoder):
"""Uses the CLIP transformer encoder for text (from Hugging Face)"""
def __init__(self, modifier_token, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
super().__init__()
self.tokenizer = CLIPTokenizer.from_pretrained(version)
self.transformer = CLIPTextModel.from_pretrained(version)
self.device = device
self.max_length = max_length
self.modifier_token = modifier_token
if '+' in self.modifier_token:
self.modifier_token = self.modifier_token.split('+')
else:
self.modifier_token = [self.modifier_token]
self.add_token()
self.freeze()
def add_token(self):
self.modifier_token_id = []
token_embeds1 = self.transformer.get_input_embeddings().weight.data
for each_modifier_token in self.modifier_token:
num_added_tokens = self.tokenizer.add_tokens(each_modifier_token)
modifier_token_id = self.tokenizer.convert_tokens_to_ids(each_modifier_token)
self.modifier_token_id.append(modifier_token_id)
self.transformer.resize_token_embeddings(len(self.tokenizer))
token_embeds = self.transformer.get_input_embeddings().weight.data
token_embeds[self.modifier_token_id[-1]] = torch.nn.Parameter(token_embeds[42170], requires_grad=True)
if len(self.modifier_token) == 2:
token_embeds[self.modifier_token_id[-2]] = torch.nn.Parameter(token_embeds[47629], requires_grad=True)
if len(self.modifier_token) == 3:
token_embeds[self.modifier_token_id[-3]] = torch.nn.Parameter(token_embeds[43514], requires_grad=True)
def custom_forward(self, hidden_states, input_ids):
r"""
Returns:
"""
input_shape = hidden_states.size()
bsz, seq_len = input_shape[:2]
if version.parse(transformers.__version__) >= version.parse('4.21'):
causal_attention_mask = self.transformer.text_model._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(
hidden_states.device
)
else:
causal_attention_mask = self.transformer.text_model._build_causal_attention_mask(bsz, seq_len).to(
hidden_states.device
)
encoder_outputs = self.transformer.text_model.encoder(
inputs_embeds=hidden_states,
causal_attention_mask=causal_attention_mask,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.transformer.text_model.final_layer_norm(last_hidden_state)
return last_hidden_state
def freeze(self):
self.transformer = self.transformer.eval()
for param in self.transformer.text_model.encoder.parameters():
param.requires_grad = False
for param in self.transformer.text_model.final_layer_norm.parameters():
param.requires_grad = False
for param in self.transformer.text_model.embeddings.position_embedding.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
indices = tokens == self.modifier_token_id[-1]
for token_id in self.modifier_token_id:
indices |= tokens == token_id
indices = (indices*1).unsqueeze(-1)
input_shape = tokens.size()
tokens = tokens.view(-1, input_shape[-1])
hidden_states = self.transformer.text_model.embeddings(input_ids=tokens)
hidden_states = (1-indices)*hidden_states.detach() + indices*hidden_states
z = self.custom_forward(hidden_states, tokens)
return z
def encode(self, text):
return self(text)
if __name__ == "__main__":
from ldm.util import count_params
model = FrozenCLIPEmbedderWrapper()
count_params(model, verbose=True)
| 16,723 | 50.937888 | 137 |
py
|
custom-diffusion
|
custom-diffusion-main/src/finetune_data.py
|
# This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import os
import numpy as np
import PIL
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
templates_small = [
'photo of a {}',
]
templates_small_style = [
'painting in the style of {}',
]
def isimage(path):
if 'png' in path.lower() or 'jpg' in path.lower() or 'jpeg' in path.lower():
return True
class MaskBase(Dataset):
def __init__(self,
datapath,
reg_datapath=None,
caption=None,
reg_caption=None,
size=512,
interpolation="bicubic",
flip_p=0.5,
aug=True,
style=False,
repeat=0.
):
self.aug = aug
self.repeat = repeat
self.style = style
self.templates_small = templates_small
if self.style:
self.templates_small = templates_small_style
if os.path.isdir(datapath):
self.image_paths1 = [os.path.join(datapath, file_path) for file_path in os.listdir(datapath) if isimage(file_path)]
else:
with open(datapath, "r") as f:
self.image_paths1 = f.read().splitlines()
self._length1 = len(self.image_paths1)
self.image_paths2 = []
self._length2 = 0
if reg_datapath is not None:
if os.path.isdir(reg_datapath):
self.image_paths2 = [os.path.join(reg_datapath, file_path) for file_path in os.listdir(reg_datapath) if isimage(file_path)]
else:
with open(reg_datapath, "r") as f:
self.image_paths2 = f.read().splitlines()
self._length2 = len(self.image_paths2)
self.labels = {
"relative_file_path1_": [x for x in self.image_paths1],
"relative_file_path2_": [x for x in self.image_paths2],
}
self.size = size
self.interpolation = {"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
}[interpolation]
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
self.caption = caption
if os.path.exists(self.caption):
self.caption = [x.strip() for x in open(caption, 'r').readlines()]
self.reg_caption = reg_caption
if os.path.exists(self.reg_caption):
self.reg_caption = [x.strip() for x in open(reg_caption, 'r').readlines()]
def __len__(self):
if self._length2 > 0:
return 2*self._length2
elif self.repeat > 0:
return self._length1*self.repeat
else:
return self._length1
def __getitem__(self, i):
example = {}
if i > self._length2 or self._length2 == 0:
image = Image.open(self.labels["relative_file_path1_"][i % self._length1])
if isinstance(self.caption, str):
example["caption"] = np.random.choice(self.templates_small).format(self.caption)
else:
example["caption"] = self.caption[i % min(self._length1, len(self.caption)) ]
else:
image = Image.open(self.labels["relative_file_path2_"][i % self._length2])
if isinstance(self.reg_caption, str):
example["caption"] = np.random.choice(self.templates_small).format(self.reg_caption)
else:
example["caption"] = self.reg_caption[i % self._length2]
if not image.mode == "RGB":
image = image.convert("RGB")
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
crop = min(img.shape[0], img.shape[1])
h, w, = img.shape[0], img.shape[1]
img = img[(h - crop) // 2:(h + crop) // 2,
(w - crop) // 2:(w + crop) // 2]
image = Image.fromarray(img)
image = self.flip(image)
if i > self._length2 or self._length2 == 0:
if self.aug:
if np.random.randint(0, 3) < 2:
random_scale = np.random.randint(self.size // 3, self.size+1)
else:
random_scale = np.random.randint(int(1.2*self.size), int(1.4*self.size))
if random_scale % 2 == 1:
random_scale += 1
else:
random_scale = self.size
if random_scale < 0.6*self.size:
add_to_caption = np.random.choice(["a far away ", "very small "])
example["caption"] = add_to_caption + example["caption"]
cx = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
cy = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
image = image.resize((random_scale, random_scale), resample=self.interpolation)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
input_image1 = np.zeros((self.size, self.size, 3), dtype=np.float32)
input_image1[cx - random_scale // 2: cx + random_scale // 2, cy - random_scale // 2: cy + random_scale // 2, :] = image
mask = np.zeros((self.size // 8, self.size // 8))
mask[(cx - random_scale // 2) // 8 + 1: (cx + random_scale // 2) // 8 - 1, (cy - random_scale // 2) // 8 + 1: (cy + random_scale // 2) // 8 - 1] = 1.
elif random_scale > self.size:
add_to_caption = np.random.choice(["zoomed in ", "close up "])
example["caption"] = add_to_caption + example["caption"]
cx = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
cy = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
image = image.resize((random_scale, random_scale), resample=self.interpolation)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
input_image1 = image[cx - self.size // 2: cx + self.size // 2, cy - self.size // 2: cy + self.size // 2, :]
mask = np.ones((self.size // 8, self.size // 8))
else:
if self.size is not None:
image = image.resize((self.size, self.size), resample=self.interpolation)
input_image1 = np.array(image).astype(np.uint8)
input_image1 = (input_image1 / 127.5 - 1.0).astype(np.float32)
mask = np.ones((self.size // 8, self.size // 8))
else:
if self.size is not None:
image = image.resize((self.size, self.size), resample=self.interpolation)
input_image1 = np.array(image).astype(np.uint8)
input_image1 = (input_image1 / 127.5 - 1.0).astype(np.float32)
mask = np.ones((self.size // 8, self.size // 8))
example["image"] = input_image1
example["mask"] = mask
return example
| 22,159 | 81.686567 | 1,097 |
py
|
custom-diffusion
|
custom-diffusion-main/src/retrieve.py
|
# Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import argparse
import os
import tqdm
from pathlib import Path
import requests
from PIL import Image
from io import BytesIO
from clip_retrieval.clip_client import ClipClient
def retrieve(target_name, outpath, num_class_images):
num_images = 2*num_class_images
client = ClipClient(url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1)
if len(target_name.split()):
target = '_'.join(target_name.split())
else:
target = target_name
os.makedirs(f'{outpath}/{target}', exist_ok=True)
if len(list(Path(f'{outpath}/{target}').iterdir())) >= num_class_images:
return
while True:
results = client.query(text=target_name)
if len(results) >= num_class_images or num_images > 1e4:
break
else:
num_images = int(1.5*num_images)
client = ClipClient(url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1)
count = 0
urls = []
captions = []
pbar = tqdm.tqdm(desc='downloading real regularization images', total=num_class_images)
for each in results:
name = f'{outpath}/{target}/{count}.jpg'
success = True
while True:
try:
img = requests.get(each['url'])
success = True
break
except:
success = False
break
if success and img.status_code == 200:
try:
_ = Image.open(BytesIO(img.content))
with open(name, 'wb') as f:
f.write(img.content)
urls.append(each['url'])
captions.append(each['caption'])
count += 1
pbar.update(1)
except:
pass
if count > num_class_images:
break
with open(f'{outpath}/caption.txt', 'w') as f:
for each in captions:
f.write(each.strip() + '\n')
with open(f'{outpath}/urls.txt', 'w') as f:
for each in urls:
f.write(each.strip() + '\n')
with open(f'{outpath}/images.txt', 'w') as f:
for p in range(count):
f.write(f'{outpath}/{target}/{p}.jpg' + '\n')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--target_name', help='target string for query',
type=str)
parser.add_argument('--outpath', help='path to save retrieved images', default='./',
type=str)
parser.add_argument('--num_class_images', help='number of retrieved images', default=200,
type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
retrieve(args.target_name, args.outpath, args.num_class_images)
| 2,989 | 31.150538 | 143 |
py
|
custom-diffusion
|
custom-diffusion-main/src/get_deltas.py
|
# Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import os
import argparse
import glob
import torch
def main(path, newtoken=0):
layers = []
for files in glob.glob(f'{path}/checkpoints/*'):
if ('=' in files or '_' in files) and 'delta' not in files:
print(files)
if '=' in files:
epoch_number = files.split('=')[1].split('.ckpt')[0]
elif '_' in files:
epoch_number = files.split('/')[-1].split('.ckpt')[0]
st = torch.load(files)["state_dict"]
if len(layers) == 0:
for key in list(st.keys()):
if 'attn2.to_k' in key or 'attn2.to_v' in key:
layers.append(key)
print(layers)
st_delta = {'state_dict': {}}
for each in layers:
st_delta['state_dict'][each] = st[each].clone()
print('/'.join(files.split('/')[:-1]) + f'/delta_epoch={epoch_number}.ckpt')
num_tokens = st['cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'].shape[0]
if newtoken > 0:
print("saving the optimized embedding")
st_delta['state_dict']['embed'] = st['cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'][-newtoken:].clone()
print(st_delta['state_dict']['embed'].shape, num_tokens)
torch.save(st_delta, '/'.join(files.split('/')[:-1]) + f'/delta_epoch={epoch_number}.ckpt')
os.remove(files)
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--path', help='path of folder to checkpoints',
type=str)
parser.add_argument('--newtoken', help='number of new tokens in the checkpoint', default=1,
type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
path = args.path
main(path, args.newtoken)
| 2,047 | 36.925926 | 149 |
py
|
custom-diffusion
|
custom-diffusion-main/src/composenW.py
|
# Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import sys
import os
import argparse
import random
import torch
import torchvision
import numpy as np
from tqdm import tqdm
from scipy.linalg import lu_factor, lu_solve
sys.path.append('stable-diffusion')
sys.path.append('./')
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
def load_model_from_config(config, ckpt):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model
def get_model(path):
config = OmegaConf.load("configs/custom-diffusion/finetune.yaml")
model = load_model_from_config(config, path)
return model, config
def gdupdateWexact(K, V, Ktarget1, Vtarget1, W, device='cuda'):
input_ = K
output = V
C = input_.T@input_
d = []
lu, piv = lu_factor(C.cpu().numpy())
for i in range(Ktarget1.size(0)):
sol = lu_solve((lu, piv), Ktarget1[i].reshape(-1, 1).cpu().numpy())
d.append(torch.from_numpy(sol).to(K.device))
d = torch.cat(d, 1).T
e2 = [email protected]
e1 = (Vtarget1.T - [email protected])
delta = [email protected](e2)
Wnew = W + delta@d
lambda_split1 = Vtarget1.size(0)
input_ = torch.cat([Ktarget1.T, K.T], dim=1)
output = torch.cat([Vtarget1, V], dim=0)
loss = torch.norm((Wnew@input_).T - output, 2, dim=1)
print(loss[:lambda_split1].mean().item(), loss[lambda_split1:].mean().item())
return Wnew
def compose(paths, category, outpath, pretrained_model_path, regularization_prompt, prompts, save_path, device='cuda'):
model, config = get_model(pretrained_model_path)
model.eval()
model.requires_grad = False
layers = []
layers_modified = []
def getlayers(model, root_name=''):
for name, module in model.named_children():
if module.__class__.__name__ == 'SpatialTransformer':
layers_modified.append(root_name + '.' + name + '.transformer_blocks.0.attn2.to_k')
layers_modified.append(root_name + '.' + name + '.transformer_blocks.0.attn2.to_v')
else:
if list(module.children()) == []:
layers.append(root_name + '.' + name)
else:
getlayers(module, root_name + '.' + name)
getlayers(model.model.diffusion_model)
for i in range(len(layers_modified)):
layers_modified[i] = 'model.diffusion_model' + layers_modified[i] + '.weight'
def get_text_embedding(prompts):
with torch.no_grad():
uc = []
for text in prompts:
tokens = tokenizer(text,
truncation=True,
max_length=77,
return_length=True,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt")
tokens = tokens["input_ids"]
end = torch.nonzero(tokens == 49407)[:, 1].min()
if 'photo of a' in text[:15]:
print(text)
uc.append((model.get_learned_conditioning(1 * [text])[:, 4:end+1]).reshape(-1, 768))
else:
uc.append((model.get_learned_conditioning(1 * [text])[:, 1:end+1]).reshape(-1, 768))
return torch.cat(uc, 0)
tokenizer = model.cond_stage_model.tokenizer
embeds = []
count = 1
model2_sts = []
modifier_tokens = []
categories = []
config.model.params.cond_stage_config.params = {}
config.model.params.cond_stage_config.params.modifier_token = None
for path1, cat1 in zip(paths.split('+'), category.split('+')):
model2_st = torch.load(path1)
if 'embed' in model2_st['state_dict']:
config.model.params.cond_stage_config.target = 'src.custom_modules.FrozenCLIPEmbedderWrapper'
embeds.append(model2_st['state_dict']['embed'][-1:])
num_added_tokens1 = tokenizer.add_tokens(f'<new{count}>')
modifier_token_id1 = tokenizer.convert_tokens_to_ids('<new1>')
modifier_tokens.append(True)
if config.model.params.cond_stage_config.params.modifier_token is None:
config.model.params.cond_stage_config.params.modifier_token = f'<new{count}>'
else:
config.model.params.cond_stage_config.params.modifier_token += f'+<new{count}>'
else:
modifier_tokens.append(False)
model2_sts.append(model2_st['state_dict'])
categories.append(cat1)
count += 1
embeds = torch.cat(embeds, 0)
model.cond_stage_model.transformer.resize_token_embeddings(len(tokenizer))
token_embeds = model.cond_stage_model.transformer.get_input_embeddings().weight.data
token_embeds[-embeds.size(0):] = embeds
f = open(regularization_prompt, 'r')
prompt = [x.strip() for x in f.readlines()][:200]
uc = get_text_embedding(prompt)
uc_targets = []
from collections import defaultdict
uc_values = defaultdict(list)
for composing_model_count in range(len(model2_sts)):
category = categories[composing_model_count]
if modifier_tokens[composing_model_count]:
string1 = f'<new{composing_model_count+1}> {category}'
else:
string1 = f'{category}'
if 'art' in string1:
prompt = [string1] + [f"painting in the style of {string1}"]
else:
prompt = [string1] + [f"a photo of {string1}"]
uc_targets.append(get_text_embedding(prompt))
for each in layers_modified:
uc_values[each].append((model2_sts[composing_model_count][each].to(device)@uc_targets[-1].T).T)
uc_targets = torch.cat(uc_targets, 0)
removal_indices = []
for i in range(uc_targets.size(0)):
for j in range(i+1, uc_targets.size(0)):
if (uc_targets[i]-uc_targets[j]).abs().mean() == 0:
removal_indices.append(j)
removal_indices = list(set(removal_indices))
uc_targets = torch.stack([uc_targets[i] for i in range(uc_targets.size(0)) if i not in removal_indices], 0)
for each in layers_modified:
uc_values[each] = torch.cat(uc_values[each], 0)
uc_values[each] = torch.stack([uc_values[each][i] for i in range(uc_values[each].size(0)) if i not in removal_indices], 0)
print(uc_values[each].size(), each)
print("target size:", uc_targets.size())
new_weights = {}
for each in layers_modified:
values = (model.state_dict()[each]@uc.T).T
input_target = uc_targets
output_target = uc_values[each]
Wnew = gdupdateWexact(uc[:values.shape[0]],
values,
input_target,
output_target,
model.state_dict()[each].clone(),
)
new_weights[each] = Wnew
print(Wnew.size())
if prompts is not None:
model.load_state_dict(new_weights, strict=False)
sampler = DDIMSampler(model)
sampler.make_schedule(ddim_num_steps=200, ddim_eta=1., verbose=False)
seed = 68
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
batch_size = 10
if not os.path.exists(prompts):
assert prompts is not None
prompts = [batch_size * [prompts]]
else:
print(f"reading prompts from {prompts}")
with open(prompts, "r") as f:
prompts = f.read().splitlines()
prompts = [batch_size * [prompt] for prompt in prompts]
print(prompts[0])
sample_path = os.path.join(f'{save_path}/{outpath}/', 'samples')
os.makedirs(sample_path, exist_ok=True)
with torch.no_grad():
for counter, prompt in enumerate(prompts):
print(prompt)
uc_try = model.get_learned_conditioning(batch_size * [prompt[0]])
unconditional_guidance_scale = 6.
cond = uc_try
unconditional_conditioning = model.get_learned_conditioning(batch_size * [""])
img = torch.randn((batch_size, 4, 64, 64)).cuda()
ddim_use_original_steps = False
timesteps = sampler.ddpm_num_timesteps if ddim_use_original_steps else sampler.ddim_timesteps
time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((batch_size,), step, device=device, dtype=torch.long)
outs = sampler.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning)
img, _ = outs
outim = model.decode_first_stage(outs[0])
outim = torch.clamp((outim + 1.0) / 2.0, min=0.0, max=1.0)
name = '-'.join(prompt[0].split(' '))
torchvision.utils.save_image(outim, f'{save_path}/{outpath}/{counter}_{name}.jpg', nrow=batch_size // 2)
new_weights['embed'] = embeds
os.makedirs(f'{save_path}/{outpath}', exist_ok=True)
os.makedirs(f'{save_path}/{outpath}/checkpoints', exist_ok=True)
os.makedirs(f'{save_path}/{outpath}/configs', exist_ok=True)
with open(f'{save_path}/{outpath}/configs/config_project.yaml', 'w') as fp:
OmegaConf.save(config=config, f=fp)
torch.save({'state_dict': new_weights}, f'{save_path}/{outpath}/checkpoints/delta_epoch=000000.ckpt')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--paths', help='+ separated list of checkpoints', required=True,
type=str)
parser.add_argument('--save_path', help='folder name to save optimized weights', default='optimized_logs',
type=str)
parser.add_argument('--categories', help='+ separated list of categories of the models', required=True,
type=str)
parser.add_argument('--prompts', help='prompts for composition model (can be a file or string)', default=None,
type=str)
parser.add_argument('--ckpt', required=True,
type=str)
parser.add_argument('--regularization_prompt', default='./data/regularization_captions.txt',
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
paths = args.paths
categories = args.categories
if ' ' in categories:
temp = categories.replace(' ', '_')
else:
temp = categories
outpath = '_'.join(['optimized', temp])
compose(paths, categories, outpath, args.ckpt, args.regularization_prompt, args.prompts, args.save_path)
| 11,722 | 38.738983 | 130 |
py
|
custom-diffusion
|
custom-diffusion-main/src/convert.py
|
# Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import os, sys
import argparse
import torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
sys.path.append('stable-diffusion')
sys.path.append('./')
from src.diffusers_model_pipeline import CustomDiffusionPipeline
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model
def load_model_from_config_addtoken(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
token_weights = sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
del sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
m, u = model.load_state_dict(sd, strict=False)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[:token_weights.shape[0]] = token_weights
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
def convert(ckpt, delta_ckpt, sd_version, config, modelname, mode):
config = OmegaConf.load(config)
model = load_model_from_config(config, f"{ckpt}")
# get the mapping of layer names between diffuser and CompVis checkpoints
mapping_compvis_to_diffuser = {}
mapping_compvis_to_diffuser_rev = {}
for key in list(model.state_dict().keys()):
if 'attn2' in key:
diffuser_key = key.replace('model.diffusion_model.', '')
if 'input_blocks' in key:
i, j = [int(x) for x in key.split('.')[3:5]]
i_, j_ = max(0, i // 3), 0 if i in [1, 4, 7] else 1
diffuser_key = diffuser_key.replace(f'input_blocks.{i}.{j}', f'down_blocks.{i_}.attentions.{j_}')
if 'output_blocks' in key:
i, j = [int(x) for x in key.split('.')[3:5]]
i_, j_ = max(0, i // 3), 0 if i % 3 == 0 else 1 if i % 3 == 1 else 2
diffuser_key = diffuser_key.replace(f'output_blocks.{i}.{j}', f'up_blocks.{i_}.attentions.{j_}')
diffuser_key = diffuser_key.replace('middle_block.1', 'mid_block.attentions.0')
mapping_compvis_to_diffuser[key] = diffuser_key
mapping_compvis_to_diffuser_rev[diffuser_key] = key
# convert checkpoint to webui
if mode in ['diffuser-to-webui' or 'compvis-to-webui']:
outpath = f'{os.path.dirname(delta_ckpt)}/webui'
os.makedirs(outpath, exist_ok=True)
if mode == 'diffuser-to-webui':
st = torch.load(delta_ckpt)
compvis_st = {}
compvis_st['state_dict'] = {}
for key in list(st['unet'].keys()):
compvis_st['state_dict'][mapping_compvis_to_diffuser_rev[key]] = st['unet'][key]
model.load_state_dict(compvis_st['state_dict'], strict=False)
torch.save({'state_dict': model.state_dict()}, f'{outpath}/{modelname}')
if 'modifier_token' in st:
os.makedirs(f'{outpath}/embeddings/', exist_ok=True)
for word, feat in st['modifier_token'].items():
torch.save({word: feat}, f'{outpath}/embeddings/{word}.pt')
else:
compvis_st = torch.load(delta_ckpt)["state_dict"]
model.load_state_dict(compvis_st['state_dict'], strict=False)
torch.save({'state_dict': model.state_dict()}, f'{outpath}/{modelname}')
if 'embed' in st:
os.makedirs(f'{outpath}/embeddings/', exist_ok=True)
for i, feat in enumerate(st['embed']):
torch.save({f'<new{i}>': feat}, f'{outpath}/embeddings/<new{i}>.pt')
# convert checkpoint from CompVis to diffuser
elif mode == 'compvis-to-diffuser':
st = torch.load(delta_ckpt)["state_dict"]
diffuser_st = {'unet': {}}
if 'embed' in st:
diffuser_st['modifier_token'] = {}
for i in range(st['embed'].size(0)):
diffuser_st['modifier_token'][f'<new{i+1}>'] = st['embed'][i].clone()
del st['embed']
for key in list(st.keys()):
diffuser_st['unet'][mapping_compvis_to_diffuser[key]] = st[key]
torch.save(diffuser_st, f'{os.path.dirname(delta_ckpt)}/delta.bin')
pipe = CustomDiffusionPipeline.from_pretrained(sd_version, torch_dtype=torch.float16).to("cuda")
pipe.load_model(f'{os.path.dirname(delta_ckpt)}/delta.bin')
pipe.save_pretrained(os.path.dirname(delta_ckpt), all=True)
# convert checkpoint from diffuser to CompVis
elif mode == 'diffuser-to-compvis':
st = torch.load(delta_ckpt)
compvis_st = {}
compvis_st['state_dict'] = {}
if 'modifier_token' in st:
compvis_st['state_dict']['embed'] = []
for _, feat in st['modifier_token'].items():
compvis_st['state_dict']['embed'].append(feat)
compvis_st['state_dict']['embed'] = torch.cat(compvis_st['state_dict']['embed'])
config.model.params.cond_stage_config.target = 'src.custom_modules.FrozenCLIPEmbedderWrapper'
config.model.params.cond_stage_config.params = {}
config.model.params.cond_stage_config.params.modifier_token = '+'.join([f'<new{i+1}>' for i in range(len(st['modifier_token']))])
for key in list(st['unet'].keys()):
compvis_st['state_dict'][mapping_compvis_to_diffuser_rev[key]] = st['unet'][key]
torch.save(compvis_st, f'{os.path.dirname(delta_ckpt)}/delta_model.ckpt')
model = load_model_from_config_addtoken(config, f"{ckpt}")
if 'modifier_token' in st:
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[-len(st['modifier_token']):] = compvis_st['state_dict']['embed']
del compvis_st['state_dict']['embed']
model.load_state_dict(compvis_st['state_dict'], strict=False)
torch.save({'state_dict': model.state_dict()}, f'{os.path.dirname(delta_ckpt)}/model.ckpt')
def parse_args():
parser = argparse.ArgumentParser('Checkpoint conversion given delta ckpts, currently supported for stable diffusion 1.4 only', add_help=True)
parser.add_argument('--ckpt', help='pretrained compvis model checkpoint', required=True,
type=str)
parser.add_argument('--delta_ckpt', help='delta checkpoint either of compvis or diffuser', required=True,
type=str)
parser.add_argument('--sd_version', default="CompVis/stable-diffusion-v1-4",
type=str)
parser.add_argument('--config', default="configs/custom-diffusion/finetune.yaml",
type=str)
parser.add_argument('--modelname', default="model.ckpt", help="name of the model to save when converting to webui",
type=str)
parser.add_argument("--mode", default='compvis-to-diffuser', choices=['diffuser-to-webui', 'compvis-to-webui', 'compvis-to-diffuser', 'diffuser-to-compvis'],
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
assert args.sd_version == "CompVis/stable-diffusion-v1-4"
convert(args.ckpt, args.delta_ckpt, args.sd_version, args.config, args.modelname, args.mode)
| 7,914 | 47.858025 | 161 |
py
|
custom-diffusion
|
custom-diffusion-main/src/__init__.py
| 0 | 0 | 0 |
py
|
|
custom-diffusion
|
custom-diffusion-main/src/diffusers_sample.py
|
# ==========================================================================================
#
# MIT License. To view a copy of the license, visit MIT_LICENSE.md.
#
# ==========================================================================================
import argparse
import sys
import os
import numpy as np
import torch
from PIL import Image
sys.path.append('./')
from src.diffusers_model_pipeline import CustomDiffusionPipeline
def sample(ckpt, delta_ckpt, from_file, prompt, compress, batch_size, freeze_model):
model_id = ckpt
pipe = CustomDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
pipe.load_model(delta_ckpt, compress)
outdir = os.path.dirname(delta_ckpt)
generator = torch.Generator(device='cuda').manual_seed(42)
all_images = []
if prompt is not None:
images = pipe([prompt]*batch_size, num_inference_steps=200, guidance_scale=6., eta=1., generator=generator).images
all_images += images
images = np.hstack([np.array(x) for x in images])
images = Image.fromarray(images)
# takes only first 50 characters of prompt to name the image file
name = '-'.join(prompt[:50].split())
images.save(f'{outdir}/{name}.png')
else:
print(f"reading prompts from {from_file}")
with open(from_file, "r") as f:
data = f.read().splitlines()
data = [[prompt]*batch_size for prompt in data]
for prompt in data:
images = pipe(prompt, num_inference_steps=200, guidance_scale=6., eta=1., generator=generator).images
all_images += images
images = np.hstack([np.array(x) for x in images], 0)
images = Image.fromarray(images)
# takes only first 50 characters of prompt to name the image file
name = '-'.join(prompt[0][:50].split())
images.save(f'{outdir}/{name}.png')
os.makedirs(f'{outdir}/samples', exist_ok=True)
for i, im in enumerate(all_images):
im.save(f'{outdir}/samples/{i}.jpg')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--ckpt', help='target string for query',
type=str)
parser.add_argument('--delta_ckpt', help='target string for query', default=None,
type=str)
parser.add_argument('--from-file', help='path to prompt file', default='./',
type=str)
parser.add_argument('--prompt', help='prompt to generate', default=None,
type=str)
parser.add_argument("--compress", action='store_true')
parser.add_argument("--batch_size", default=5, type=int)
parser.add_argument('--freeze_model', help='crossattn or crossattn_kv', default='crossattn_kv',
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
sample(args.ckpt, args.delta_ckpt, args.from_file, args.prompt, args.compress, args.batch_size, args.freeze_model)
| 3,037 | 39.506667 | 122 |
py
|
custom-diffusion
|
custom-diffusion-main/customconcept101/evaluate.py
|
import argparse
import glob
import json
import os
import warnings
from pathlib import Path
import clip
import numpy as np
import pandas as pd
import sklearn.preprocessing
import torch
from packaging import version
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from tqdm import tqdm
class CLIPCapDataset(torch.utils.data.Dataset):
def __init__(self, data, append=False, prefix='A photo depicts'):
self.data = data
self.prefix = ''
if append:
self.prefix = prefix
if self.prefix[-1] != ' ':
self.prefix += ' '
def __getitem__(self, idx):
c_data = self.data[idx]
c_data = clip.tokenize(self.prefix + c_data, truncate=True).squeeze()
return {'caption': c_data}
def __len__(self):
return len(self.data)
def Convert(image):
return image.convert("RGB")
class CLIPImageDataset(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
# only 224x224 ViT-B/32 supported for now
self.preprocess = self._transform_test(224)
def _transform_test(self, n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
Convert,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
])
def __getitem__(self, idx):
c_data = self.data[idx]
image = Image.open(c_data)
image = self.preprocess(image)
return {'image': image}
def __len__(self):
return len(self.data)
class DINOImageDataset(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
# only 224x224 ViT-B/32 supported for now
self.preprocess = self._transform_test(224)
def _transform_test(self, n_px):
return Compose([
Resize(256, interpolation=Image.BICUBIC),
CenterCrop(n_px),
Convert,
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
def __getitem__(self, idx):
c_data = self.data[idx]
image = Image.open(c_data)
image = self.preprocess(image)
return {'image': image}
def __len__(self):
return len(self.data)
def extract_all_captions(captions, model, device, batch_size=256, num_workers=8, append=False):
data = torch.utils.data.DataLoader(
CLIPCapDataset(captions, append=append),
batch_size=batch_size, num_workers=num_workers, shuffle=False)
all_text_features = []
with torch.no_grad():
for b in tqdm(data):
b = b['caption'].to(device)
all_text_features.append(model.encode_text(b).cpu().numpy())
all_text_features = np.vstack(all_text_features)
return all_text_features
def extract_all_images(images, model, datasetclass, device, batch_size=64, num_workers=8):
data = torch.utils.data.DataLoader(
datasetclass(images),
batch_size=batch_size, num_workers=num_workers, shuffle=False)
all_image_features = []
with torch.no_grad():
for b in tqdm(data):
b = b['image'].to(device)
if hasattr(model, 'encode_image'):
if device == 'cuda':
b = b.to(torch.float16)
all_image_features.append(model.encode_image(b).cpu().numpy())
else:
all_image_features.append(model(b).cpu().numpy())
all_image_features = np.vstack(all_image_features)
return all_image_features
def get_clip_score(model, images, candidates, device, append=False, w=2.5):
'''
get standard image-text clipscore.
images can either be:
- a list of strings specifying filepaths for images
- a precomputed, ordered matrix of image features
'''
if isinstance(images, list):
# need to extract image features
images = extract_all_images(images, model, device)
candidates = extract_all_captions(candidates, model, device, append=append)
# as of numpy 1.21, normalize doesn't work properly for float16
if version.parse(np.__version__) < version.parse('1.21'):
images = sklearn.preprocessing.normalize(images, axis=1)
candidates = sklearn.preprocessing.normalize(candidates, axis=1)
else:
warnings.warn(
'due to a numerical instability, new numpy normalization is slightly different than paper results. '
'to exactly replicate paper results, please use numpy version less than 1.21, e.g., 1.20.3.')
images = images / np.sqrt(np.sum(images ** 2, axis=1, keepdims=True))
candidates = candidates / \
np.sqrt(np.sum(candidates ** 2, axis=1, keepdims=True))
per = w * np.clip(np.sum(images * candidates, axis=1), 0, None)
return np.mean(per), per, candidates
def clipeval(image_dir, candidates_json, device):
image_paths = [os.path.join(image_dir, path) for path in os.listdir(image_dir)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
image_ids = [Path(path).stem for path in image_paths]
with open(candidates_json) as f:
candidates = json.load(f)
candidates = [candidates[cid] for cid in image_ids]
model, _ = clip.load("ViT-B/32", device=device, jit=False)
model.eval()
image_feats = extract_all_images(
image_paths, model, CLIPImageDataset, device, batch_size=64, num_workers=8)
_, per_instance_image_text, _ = get_clip_score(
model, image_feats, candidates, device)
scores = {image_id: {'CLIPScore': float(clipscore)}
for image_id, clipscore in
zip(image_ids, per_instance_image_text)}
print('CLIPScore: {:.4f}'.format(
np.mean([s['CLIPScore'] for s in scores.values()])))
return np.mean([s['CLIPScore'] for s in scores.values()]), np.std([s['CLIPScore'] for s in scores.values()])
def clipeval_image(image_dir, image_dir_ref, device):
image_paths = [os.path.join(image_dir, path) for path in os.listdir(image_dir)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
image_paths_ref = [os.path.join(image_dir_ref, path) for path in os.listdir(image_dir_ref)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
model, _ = clip.load("ViT-B/32", device=device, jit=False)
model.eval()
image_feats = extract_all_images(
image_paths, model, CLIPImageDataset, device, batch_size=64, num_workers=8)
image_feats_ref = extract_all_images(
image_paths_ref, model, CLIPImageDataset, device, batch_size=64, num_workers=8)
image_feats = image_feats / \
np.sqrt(np.sum(image_feats ** 2, axis=1, keepdims=True))
image_feats_ref = image_feats_ref / \
np.sqrt(np.sum(image_feats_ref ** 2, axis=1, keepdims=True))
res = image_feats @ image_feats_ref.T
return np.mean(res)
def dinoeval_image(image_dir, image_dir_ref, device):
image_paths = [os.path.join(image_dir, path) for path in os.listdir(image_dir)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
image_paths_ref = [os.path.join(image_dir_ref, path) for path in os.listdir(image_dir_ref)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
model = torch.hub.load('facebookresearch/dino:main', 'dino_vits16').to(device)
model.eval()
image_feats = extract_all_images(
image_paths, model, DINOImageDataset, device, batch_size=64, num_workers=8)
image_feats_ref = extract_all_images(
image_paths_ref, model, DINOImageDataset, device, batch_size=64, num_workers=8)
image_feats = image_feats / \
np.sqrt(np.sum(image_feats ** 2, axis=1, keepdims=True))
image_feats_ref = image_feats_ref / \
np.sqrt(np.sum(image_feats_ref ** 2, axis=1, keepdims=True))
res = image_feats @ image_feats_ref.T
return np.mean(res)
def calmetrics(sample_root, target_paths, numgen, outpkl):
device = 'cuda'
if os.path.exists(outpkl):
df = pd.read_pickle(outpkl)
else:
df = pd.DataFrame()
full = {}
assert sample_root.is_dir()
image_path = sample_root / 'samples'
json_path = sample_root / 'prompts.json'
assert len(glob.glob(str(image_path / '*.png'))) == numgen, "Sample folder does not contain required number of images"
textalignment, _ = \
clipeval(str(image_path), str(json_path), device)
sd = {}
sd['CLIP Text alignment'] = textalignment
for i, target_path in enumerate(target_paths.split('+')):
imagealignment = \
clipeval_image(str(image_path), target_path, device)
dinoimagealignment = \
dinoeval_image(str(image_path), target_path, device)
if i > 0:
sd[f'CLIP Image alignment{i}'] = imagealignment
sd[f'DINO Image alignment{i}'] = dinoimagealignment
else:
sd['CLIP Image alignment'] = imagealignment
sd['DINO Image alignment'] = dinoimagealignment
expname = sample_root
if expname not in full:
full[expname] = sd
else:
full[expname] = {**sd, **full[expname]}
print(sd)
print("Metrics:", full)
for expname, sd in full.items():
if expname not in df.index:
df1 = pd.DataFrame(sd, index=[expname])
df = pd.concat([df, df1])
else:
df.loc[df.index == expname, sd.keys()] = sd.values()
df.to_pickle(outpkl)
def parse_args():
parser = argparse.ArgumentParser("metric", add_help=False)
parser.add_argument("--sample_root", type=str,
help="the root folder to generated images")
parser.add_argument("--numgen", type=int, default=100,
help="total number of images.")
parser.add_argument("--target_paths", type=str,
help="+ separated paths to real target images")
parser.add_argument("--outpkl", type=str, default="evaluation.pkl",
help="the path to save result pkl file")
return parser.parse_args()
def main(args):
calmetrics(Path(args.sample_root), args.target_paths,
args.numgen, args.outpkl)
if __name__ == "__main__":
# distributed setting
args = parse_args()
main(args)
| 10,450 | 33.953177 | 122 |
py
|
BioSimulator.jl
|
BioSimulator.jl-master/benchmarks/stochpy_bench.py
|
import stochpy
import statistics as stats
from statsmodels import robust
import argparse
import random
import sys
def benchmark(fdir, method, tfinal, nsaves, nreal, seed, n_sample):
# initialize
smod = stochpy.SSA()
times = []
smod.Model(model_file='stochpy.psc', dir=fdir)
random.seed(seed)
for i in range(n_sample):
print(f"sample {i+1} / {n_sample}")
smod.DoStochSim(trajectories=nreal, end=tfinal, method=method, mode="time")
times.append(smod.simulation_time)
return times
def save_results(times, model_name):
fout = open(f"./stochpy/{model_name}.txt", 'a')
for t in times:
fout.write("%s\n" % t)
fout.close()
return
parser = argparse.ArgumentParser()
parser.add_argument(dest='model_name')
parser.add_argument(dest='tfinal', type=float)
parser.add_argument(dest='nsaves', type=int)
parser.add_argument(dest='nreal', type=int)
parser.add_argument(dest='seed', type=int)
parser.add_argument(dest='n_sample', type=int)
args = parser.parse_args()
# METHODS = ['Direct', 'FRM', 'NRM', 'TauLeap']
METHODS = ['Direct']
fdir = f"./{args.model_name}/"
tfinal = args.tfinal
nsaves = args.nsaves
nreal = args.nreal
seed = args.seed
n_sample = args.n_sample
for method in METHODS:
times = benchmark(fdir, method, tfinal, nsaves, nreal, seed, n_sample)
save_results(times, args.model_name)
| 1,358 | 22.033898 | 79 |
py
|
globus-automate-client
|
globus-automate-client-main/examples/sdk_scripts/flow_deploy_and_run.py
|
#!/usr/bin/env python
import json
import sys
from globus_automate_client import create_flows_client
def main():
flow_file = sys.argv[1]
with open(flow_file, "r") as ff:
flow_dict = json.load(ff)
flow_input = sys.argv[2]
with open(flow_input, "r") as fi:
flow_input_data = json.load(fi)
fc = create_flows_client()
deploy_result = fc.deploy_flow(
flow_dict,
title="Deployed via SDK",
visible_to=["public"],
runnable_by=["all_authenticated_users"],
)
flow_id = deploy_result.data["id"]
print(f"Flow id is {flow_id}")
run_result = fc.run_flow(flow_id, None, flow_input_data)
print(f"Flow Run Result: {json.dumps(run_result.data, indent=4)}")
if __name__ == "__main__":
main()
| 773 | 24.8 | 70 |
py
|
globus-automate-client
|
globus-automate-client-main/tests/conftest.py
|
import pytest
import responses
@pytest.fixture(autouse=True)
def mocked_responses():
"""Mock responses to requests."""
with responses.RequestsMock() as request_mock:
yield request_mock
| 204 | 17.636364 | 50 |
py
|
globus-automate-client
|
globus-automate-client-main/tests/test_flows_client.py
|
import json
import os
import pathlib
import urllib.parse
from typing import Any, Dict, Union, cast
from unittest.mock import Mock
import pytest
import yaml
from globus_automate_client import flows_client
VALID_FLOW_DEFINITION = {
"StartAt": "perfect",
"States": {
"perfect": {
"Type": "Pass",
"End": True,
},
},
}
@pytest.fixture
def fc():
client = flows_client.FlowsClient("client", flows_client.AccessTokenAuthorizer)
original_authorizer = client.authorizer
yield client
assert client.authorizer is original_authorizer
@pytest.mark.parametrize(
"d, names, stop_names, expected, message",
(
# Empty inputs and outputs
({}, set(), None, set(), "nothing should be returned"),
({}, {"i"}, None, set(), "nothing should be returned"),
({}, set(), {"x"}, set(), "nothing should be returned"),
({}, {"i"}, {"x"}, set(), "nothing should be returned"),
({"i": "1"}, set(), None, set(), "nothing should be returned"),
({"i": 123}, {"i"}, None, set(), "nothing should be returned"),
({"i": [123]}, {"i"}, None, set(), "nothing should be returned"),
({"x": "1"}, {"i"}, None, set(), "nothing should be returned"),
({"x": "1"}, set(), {"x"}, set(), "nothing should be returned"),
#
# Corner case behavior
({"x": "1"}, {"x"}, {"x"}, {"1"}, "failed to find str (corner case)"),
#
# Test includes
({"i": "1"}, {"i"}, None, {"1"}, "failed to find top-level str"),
({"i": {"i": "1"}}, {"i"}, None, {"1"}, "failed to find str in dict"),
({"i": ["1"]}, {"i"}, None, {"1"}, "failed to find str in list"),
({"i": ["1", "2"]}, {"i"}, None, {"1", "2"}, "failed to find values in list"),
({"i": ["1", {"i": "2"}]}, {"i"}, None, {"1", "2"}, "failed to find values"),
({"i": [{"i": "1"}]}, {"i"}, None, {"1"}, "failed to find str in list->dict"),
#
# Test excludes
({"x": {"i": "1"}}, {"i"}, {"x"}, set(), "found str in excluded dict"),
),
)
def test_all_vals_for_keys(d, names, stop_names, expected, message):
"""Validate values are found or ignored correctly."""
assert flows_client._all_vals_for_keys(names, d, stop_names) == expected, message
def test_validate_flow_definition_valid():
"""Confirm that valid and well-formed schema raise no errors."""
flows_client.validate_flow_definition(VALID_FLOW_DEFINITION)
def test_validate_flow_definition_multiple_validity_errors():
"""Confirm that validity checks can report multiple errors."""
schema = {
# "StartAt" is missing
"States": {
"bogus": {},
},
}
with pytest.raises(flows_client.FlowValidationError) as raised:
flows_client.validate_flow_definition(schema)
assert "'StartAt' is a required property" in raised.value.args[0]
assert "'States.bogus'" in raised.value.args[0]
def test_validate_flow_definition_multiple_ill_formed_errors():
"""Confirm that well-formed checks can report multiple errors."""
schema = {
"StartAt": "undefined",
"States": {
"unreferenced": {
"Type": "Pass",
"End": True,
},
},
}
with pytest.raises(flows_client.FlowValidationError) as raised:
flows_client.validate_flow_definition(schema)
assert "not referenced" in raised.value.args[0]
assert "not defined" in raised.value.args[0]
input_schemas = pathlib.Path(__file__).parent.rglob("../examples/**/*schema.*")
@pytest.mark.parametrize("filename", input_schemas)
def test_validate_input_schema(filename):
"""Confirm that example input schemas all validate correctly."""
if "invalid" in filename.name:
pytest.xfail(f"{filename} is invalid according to its filename")
with filename.open() as file:
if filename.suffix == ".json":
schema = json.load(file)
else: # filename.suffix == ".yaml"
schema = yaml.safe_load(file)
flows_client.validate_input_schema(schema)
@pytest.mark.parametrize("schema", (None, set()))
def test_validate_input_schema_bad_type(schema):
"""Confirm that a bad input type results in failures."""
with pytest.raises(flows_client.FlowValidationError):
flows_client.validate_input_schema(schema)
def test_validate_input_schema_multiple_failures():
"""Confirm that an invalid schema can report multiple errors."""
schema = {
"properties": {
"trouble": {
"type": "bogus",
},
},
"required": False,
}
with pytest.raises(flows_client.FlowValidationError) as raised:
flows_client.validate_input_schema(schema)
assert "'properties.trouble.type' invalid" in raised.value.args[0]
assert "'required' invalid" in raised.value.args[0]
@pytest.mark.parametrize(
"value, expected",
(
(None, flows_client.PROD_FLOWS_BASE_URL),
("prod", flows_client.PROD_FLOWS_BASE_URL),
("bogus", ValueError),
),
)
def test_get_flows_base_url_for_environment_known(monkeypatch, value, expected):
"""Verify that env variables and base URL's are associated correctly."""
monkeypatch.setattr(os.environ, "get", lambda x: value)
if expected is ValueError:
with pytest.raises(ValueError):
flows_client._get_flows_base_url_for_environment()
else:
assert flows_client._get_flows_base_url_for_environment() == expected
def test_deploy_flow_data_construction(fc, mocked_responses):
"""Verify the flow JSON data is constructed correctly."""
mocked_responses.add("POST", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
expected: Dict[str, Union[str, Dict[str, Any]]] = {
"definition": VALID_FLOW_DEFINITION,
"input_schema": {"Comment": "flow-input-schema"},
"title": "--title--",
"subtitle": "--subtitle--",
"description": "--description--",
"keywords": "--keywords--",
"flow_viewers": ["--flow_viewers--"],
"flow_starters": ["--flow_starters--"],
"flow_administrators": ["--flow_administrators--"],
"subscription_id": "--subscription_id--",
}
fc.deploy_flow(
# Arguments that affect the JSON data
flow_definition=expected["definition"],
input_schema=expected["input_schema"],
title=expected["title"],
subtitle=expected["subtitle"],
description=expected["description"],
keywords=expected["keywords"],
flow_viewers=expected["flow_viewers"],
flow_starters=expected["flow_starters"],
flow_administrators=expected["flow_administrators"],
subscription_id=expected["subscription_id"],
# Other arguments
validate_definition=True,
validate_schema=True,
dry_run=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert data == expected
@pytest.mark.parametrize("input_schema, expected", ((None, False), ({}, True)))
def test_deploy_flow_only_exclude_input_schema_if_none(
fc, mocked_responses, input_schema, expected
):
"""Verify the *input_schema* is not excluded even if it's false-y."""
mocked_responses.add("POST", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
fc.deploy_flow(
# Included arguments
flow_definition=VALID_FLOW_DEFINITION,
title="--title--",
input_schema=input_schema,
# Excluded arguments
subtitle="",
description=None,
# Other arguments
validate_definition=False,
validate_schema=False,
dry_run=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert "subtitle" not in data
assert "description" not in data
assert ("input_schema" in data) is expected
@pytest.mark.parametrize("dry_run, path", ((False, "flows"), (True, "flows/dry-run")))
def test_deploy_flow_dry_run(fc, mocked_responses, dry_run, path):
"""Verify the *dry_run* parameter affects the URL path."""
url = f"{flows_client.PROD_FLOWS_BASE_URL}/{path}"
mocked_responses.add("POST", url)
fc.deploy_flow(
flow_definition=VALID_FLOW_DEFINITION,
title="bogus",
validate_schema=False,
dry_run=dry_run,
)
assert mocked_responses.calls[0].request.url == url
def test_deploy_flow_aliases_batch_1(fc, mocked_responses):
"""Verify that viewer/starter/admin aliases are still supported."""
mocked_responses.add("POST", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
with pytest.warns(DeprecationWarning):
fc.deploy_flow(
visible_to=["v1"],
runnable_by=["s1"],
administered_by=["a1"],
# Everything below is mandatory but irrelevant to this test.
flow_definition=VALID_FLOW_DEFINITION,
title="",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert set(data["flow_viewers"]) == {"v1"}
assert set(data["flow_starters"]) == {"s1"}
assert set(data["flow_administrators"]) == {"a1"}
def test_deploy_flow_aliases_batch_2(fc, mocked_responses):
"""Verify that viewer/starter/admin aliases are still supported."""
mocked_responses.add("POST", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
with pytest.warns(DeprecationWarning):
fc.deploy_flow(
viewers=["v2"],
starters=["s2"],
administrators=["a2"],
# Everything below is mandatory but irrelevant to this test.
flow_definition=VALID_FLOW_DEFINITION,
title="",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert set(data["flow_viewers"]) == {"v2"}
assert set(data["flow_starters"]) == {"s2"}
assert set(data["flow_administrators"]) == {"a2"}
@pytest.mark.parametrize("method", ("deploy_flow", "update_flow"))
def test_invalid_flow_definition_failure(fc, method):
"""Verify that an invalid flow definition triggers a failure."""
with pytest.raises(flows_client.FlowValidationError):
getattr(fc, method)(
flow_id="bogus-id",
flow_definition={"bogus": True},
title="title",
validate_definition=True,
)
@pytest.mark.parametrize("method", ("deploy_flow", "update_flow"))
def test_invalid_input_schema_failure(fc, method):
"""Verify that an invalid input schema triggers a failure."""
with pytest.raises(flows_client.FlowValidationError):
getattr(fc, method)(
flow_id="bogus-id",
flow_definition=VALID_FLOW_DEFINITION,
input_schema={"required": False},
title="title",
validate_definition=False,
validate_schema=True,
)
def test_update_flow_data_construction(fc, mocked_responses):
"""Verify the flow JSON data is constructed correctly."""
mocked_responses.add("PUT", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus")
expected: Dict[str, Union[str, Dict[str, Any]]] = {
"definition": VALID_FLOW_DEFINITION,
"input_schema": {"Comment": "flow-input-schema"},
"title": "--title--",
"subtitle": "--subtitle--",
"description": "--description--",
"keywords": "--keywords--",
"flow_viewers": ["--flow_viewers--"],
"flow_starters": ["--flow_starters--"],
"flow_administrators": ["--flow_administrators--"],
"subscription_id": "--subscription_id--",
}
fc.update_flow(
# Arguments that affect the JSON data
flow_id="bogus",
flow_definition=expected["definition"],
input_schema=expected["input_schema"],
title=expected["title"],
subtitle=expected["subtitle"],
description=expected["description"],
keywords=expected["keywords"],
flow_viewers=expected["flow_viewers"],
flow_starters=expected["flow_starters"],
flow_administrators=expected["flow_administrators"],
subscription_id=expected["subscription_id"],
# Other arguments
validate_definition=True,
validate_schema=True,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert data == expected
@pytest.mark.parametrize("input_schema, expected", ((None, False), ({}, True)))
def test_update_flow_exclude_most_false_values(
fc, mocked_responses, input_schema, expected
):
"""Verify the *input_schema* is not excluded even if it's false-y."""
mocked_responses.add("PUT", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus")
fc.update_flow(
# *input_schema* is being tested for inclusion/exclusion.
input_schema=input_schema,
# These are false-y and will always be excluded.
subtitle="",
description=None,
# Mandatory arguments, but not under test.
flow_id="bogus",
flow_definition=VALID_FLOW_DEFINITION,
title="--title--",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert "subtitle" in data
assert "description" not in data
assert ("input_schema" in data) is expected
def test_update_flow_aliases_batch_1(fc, mocked_responses):
"""Verify that viewer/starter/admin aliases are still supported."""
mocked_responses.add("PUT", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus")
with pytest.warns(DeprecationWarning):
fc.update_flow(
visible_to=["v1"],
runnable_by=["s1"],
administered_by=["a1"],
# Everything below is mandatory but irrelevant to this test.
flow_id="bogus",
flow_definition=VALID_FLOW_DEFINITION,
title="",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert set(data["flow_viewers"]) == {"v1"}
assert set(data["flow_starters"]) == {"s1"}
assert set(data["flow_administrators"]) == {"a1"}
def test_update_flow_aliases_batch_2(fc, mocked_responses):
"""Verify that viewer/starter/admin aliases are still supported."""
mocked_responses.add("PUT", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus")
with pytest.warns(DeprecationWarning):
fc.update_flow(
viewers=["v2"],
starters=["s2"],
administrators=["a2"],
# Everything below is mandatory but irrelevant to this test.
flow_id="bogus",
flow_definition=VALID_FLOW_DEFINITION,
title="",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert set(data["flow_viewers"]) == {"v2"}
assert set(data["flow_starters"]) == {"s2"}
assert set(data["flow_administrators"]) == {"a2"}
def test_get_flow(fc, mocked_responses):
"""Verify the URL that is used to get a flow definition."""
url = f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus"
mocked_responses.add("GET", url)
fc.get_flow("bogus")
assert mocked_responses.calls[0].request.url == url
@pytest.mark.parametrize(
"role, roles, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# role
("", None, {}, "false-y *role* must not be included"),
("1", None, {"filter_role": "1"}, "*role* must be included"),
# roles
(None, tuple(), {}, "false-y *roles* must not be included"),
(None, ("2", "3"), {"filter_roles": "2,3"}, "*roles* must be included"),
# Precedence
("1", ("2", "3"), {"filter_role": "1"}, "*role* must override *roles*"),
),
)
def test_list_flows_role_precedence(
fc, mocked_responses, role, roles, expected, message
):
"""Verify the *role* and *roles* precedence rules."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
fc.list_flows(role=role, roles=roles)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("filter_role", "filter_roles"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
@pytest.mark.parametrize(
"marker, per_page, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# marker
("", None, {}, "false-y *marker* must not be included"),
("m", None, {"pagination_token": "m"}, "*marker* must be included"),
# per_page
(None, 0, {}, "false-y *per_page* must not be included"),
(None, 10, {"per_page": "10"}, "*per_page* must be included"),
# Precedence
("m", 10, {"pagination_token": "m"}, "*marker* must override *per_page*"),
),
)
def test_list_flows_pagination_parameters(
fc, mocked_responses, marker, per_page, expected, message
):
"""Verify *marker* and *per_page* precedence rules."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
fc.list_flows(marker=marker, per_page=per_page)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("pagination_token", "per_page"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
def test_list_flows_filters(fc, mocked_responses):
"""Verify that filters are applied to the query parameters."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
fc.list_flows(role="role", filters={"1": "2", "filter_role": "bogus"})
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert data["1"] == "2", "*filters* were not applied to the query"
assert data["filter_role"] == "role", "*filters* overwrote *role*"
def test_list_flows_orderings(fc, mocked_responses):
"""Verify that orderings are serialized as expected."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows")
fc.list_flows(orderings={"shape": "asc", "color": "DESC", "bogus": "undefined"})
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert set(data["orderby"].split(",")) == {
"shape asc",
"color DESC",
"bogus undefined",
}
def test_delete_flow(fc, mocked_responses):
"""Verify the URL used when deleting a flow."""
url = f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus"
mocked_responses.add("DELETE", url)
fc.delete_flow("bogus")
assert mocked_responses.calls[0].request.url == url
def test_scope_for_flow(fc, mocked_responses):
"""Verify that scopes can be introspected.
This method relies entirely on ActionClient code.
"""
mocked_responses.add(
method="GET",
url=f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus-id",
json={"globus_auth_scope": "bogus-scope"},
)
assert fc.scope_for_flow("bogus-id") == "bogus-scope"
def test_get_authorizer_for_flow_found_in_extras(fc):
"""Verify that an authorizer can be found in *extras*."""
authorizer = fc._get_authorizer_for_flow("1", "2", {"authorizer": "extra"})
assert authorizer == "extra", "authorizer not found in *extras* parameter"
@pytest.mark.parametrize(
"flow_scope, expected",
(
(None, "dynamic-lookup"),
("", ""),
("passed-value", "passed-value"),
),
)
def test_get_authorizer_for_flow_scope_lookup(fc, monkeypatch, flow_scope, expected):
"""Verify that scopes are dynamically looked up as needed."""
monkeypatch.setattr(fc, "scope_for_flow", lambda _: "dynamic-lookup")
monkeypatch.setattr(fc, "get_authorizer_callback", lambda **x: x)
result = cast(dict, fc._get_authorizer_for_flow("bogus", flow_scope, {}))
assert result["flow_scope"] == expected
@pytest.mark.parametrize("dry_run, expected", ((False, "run"), (True, "run/dry-run")))
def test_run_flow_dry_run(fc, mocked_responses, dry_run, expected):
"""Verify the *dry_run* parameter affects the URL path."""
url = f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus-id/{expected}"
mocked_responses.add("POST", url)
fc.run_flow(
# *dry_run* is being tested.
dry_run=dry_run,
# These parameters are necessary but irrelevant.
flow_id="bogus-id",
flow_scope="bogus-scope",
flow_input={},
authorizer=fc.authorizer,
)
assert mocked_responses.calls[0].request.url == url
@pytest.mark.parametrize(
"run_monitors, monitor_by, run_managers, manage_by, expected, message",
(
(None, None, None, None, {}, "empty values should be excluded"),
# Monitors
([], None, None, None, {}, "false-y run_monitors must be excluded"),
(None, [], None, None, {}, "false-y monitor_by must be excluded"),
(
["mon1", "mon2"],
None,
None,
None,
{"monitor_by": ["mon1", "mon2"]},
"run_monitors must be included",
),
(
None,
["mon3"],
None,
None,
{"monitor_by": ["mon3"]},
"monitor_by must be included",
),
(
["mon1", "mon2"],
["mon3"],
None,
None,
{"monitor_by": ["mon1", "mon2", "mon3"]},
"monitor agents must be combined",
),
# Managers
(None, None, [], None, {}, "false-y run_managers must be excluded"),
(None, None, None, [], {}, "false-y manage_by must be excluded"),
(
None,
None,
["man1", "man2"],
None,
{"manage_by": ["man1", "man2"]},
"run_managers must be included",
),
(
None,
None,
None,
["man3"],
{"manage_by": ["man3"]},
"manage_by must be included",
),
(
None,
None,
["man1", "man2"],
["man3"],
{"manage_by": ["man1", "man2", "man3"]},
"manager agents must be combined",
),
),
)
def test_run_flow_aliases(
fc,
mocked_responses,
run_monitors,
monitor_by,
run_managers,
manage_by,
expected,
message,
):
"""Verify the monitor and manager aliases are functional."""
mocked_responses.add(
method="POST",
url=f"{flows_client.PROD_FLOWS_BASE_URL}/flows/bogus-id/run",
json={},
)
fc.run_flow(
# These parameters are being tested.
run_monitors=run_monitors,
monitor_by=monitor_by,
run_managers=run_managers,
manage_by=manage_by,
# These parameters are necessary but irrelevant.
flow_id="bogus-id",
flow_scope="bogus-scope",
flow_input={},
authorizer=fc.authorizer,
)
data = json.loads(mocked_responses.calls[0].request.body or "{}")
for key in ("manage_by", "monitor_by"):
if key in expected:
assert key in data, f"*{key}* must be in the submitted data"
assert set(data[key]) == set(expected[key])
else:
assert key not in data, f"*{key}* must not be in the submitted data"
@pytest.mark.parametrize(
"role, roles, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# role
("", None, {}, "false-y *role* must not be included"),
("1", None, {"filter_role": "1"}, "*role* must be included"),
# roles
(None, tuple(), {}, "false-y *roles* must not be included"),
(None, ("2", "3"), {"filter_roles": "2,3"}, "*roles* must be included"),
# Precedence
("1", ("2", "3"), {"filter_role": "1"}, "*role* must override *roles*"),
),
)
def test_enumerate_runs_role_precedence(
fc, mocked_responses, monkeypatch, role, roles, expected, message
):
"""Verify the *role* and *roles* precedence rules."""
monkeypatch.setattr(fc, "get_authorizer_callback", lambda **x: fc.authorizer)
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/runs")
fc.enumerate_runs(role=role, roles=roles)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("filter_role", "filter_roles"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
@pytest.mark.parametrize(
"marker, per_page, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# marker
("", None, {}, "false-y *marker* must not be included"),
("m", None, {"pagination_token": "m"}, "*marker* must be included"),
# per_page
(None, 0, {}, "false-y *per_page* must not be included"),
(None, 10, {"per_page": "10"}, "*per_page* must be included"),
# Precedence
("m", 10, {"pagination_token": "m"}, "*marker* must override *per_page*"),
),
)
def test_enumerate_runs_pagination_parameters(
fc, mocked_responses, monkeypatch, marker, per_page, expected, message
):
"""Verify *marker* and *per_page* precedence rules."""
monkeypatch.setattr(fc, "get_authorizer_callback", lambda **x: fc.authorizer)
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/runs")
fc.enumerate_runs(marker=marker, per_page=per_page)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("pagination_token", "per_page"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
def test_enumerate_runs_filters(fc, mocked_responses, monkeypatch):
"""Verify that filters are applied to the query parameters."""
monkeypatch.setattr(fc, "get_authorizer_callback", lambda **x: fc.authorizer)
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/runs")
fc.enumerate_runs(role="role", filters={"1": "2", "filter_role": "bogus"})
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert data["1"] == "2", "*filters* were not applied to the query"
assert data["filter_role"] == "role", "*filters* overwrote *role*"
def test_enumerate_runs_orderings(fc, mocked_responses, monkeypatch):
"""Verify that orderings are serialized as expected."""
monkeypatch.setattr(fc, "get_authorizer_callback", lambda **x: fc.authorizer)
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/runs")
fc.enumerate_runs(orderings={"shape": "asc", "color": "DESC", "bogus": "bad"})
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert set(data["orderby"].split(",")) == {
"shape asc",
"color DESC",
"bogus bad",
}
def test_enumerate_runs_statuses(fc, mocked_responses, monkeypatch):
"""Verify that orderings are serialized as expected."""
monkeypatch.setattr(fc, "get_authorizer_callback", lambda **x: fc.authorizer)
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/runs")
fc.enumerate_runs(statuses=("SUCCEEDED", "FAILED"))
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert set(data["filter_status"].split(",")) == {"SUCCEEDED", "FAILED"}
@pytest.mark.parametrize(
"role, roles, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# role
("", None, {}, "false-y *role* must not be included"),
("1", None, {"filter_role": "1"}, "*role* must be included"),
# roles
(None, tuple(), {}, "false-y *roles* must not be included"),
(None, ("2", "3"), {"filter_roles": "2,3"}, "*roles* must be included"),
# Precedence
("1", ("2", "3"), {"filter_role": "1"}, "*role* must override *roles*"),
),
)
def test_list_flow_runs_role_precedence(
fc, mocked_responses, role, roles, expected, message
):
"""Verify the *role* and *roles* precedence rules."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/-/runs")
fc.list_flow_runs(
"-",
role=role,
roles=roles,
authorizer=fc.authorizer,
)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("filter_role", "filter_roles"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
@pytest.mark.parametrize(
"marker, per_page, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# marker
("", None, {}, "false-y *marker* must not be included"),
("m", None, {"pagination_token": "m"}, "*marker* must be included"),
# per_page
(None, 0, {}, "false-y *per_page* must not be included"),
(None, 10, {"per_page": "10"}, "*per_page* must be included"),
# Precedence
("m", 10, {"pagination_token": "m"}, "*marker* must override *per_page*"),
),
)
def test_list_flow_runs_pagination_parameters(
fc, mocked_responses, marker, per_page, expected, message
):
"""Verify *marker* and *per_page* precedence rules."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/-/runs")
fc.list_flow_runs(
"-",
marker=marker,
per_page=per_page,
authorizer=fc.authorizer,
)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("pagination_token", "per_page"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
def test_list_flow_runs_filters(fc, mocked_responses):
"""Verify that filters are applied to the query parameters."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/-/runs")
fc.list_flow_runs(
"-",
role="role",
filters={"1": "2", "filter_role": "bogus"},
authorizer=fc.authorizer,
)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert data["1"] == "2", "*filters* were not applied to the query"
assert data["filter_role"] == "role", "*filters* overwrote *role*"
def test_list_flow_runs_orderings(fc, mocked_responses):
"""Verify that orderings are serialized as expected."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/-/runs")
fc.list_flow_runs(
"-",
orderings={"shape": "asc", "color": "DESC", "bogus": "bad"},
authorizer=fc.authorizer,
)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert set(data["orderby"].split(",")) == {
"shape asc",
"color DESC",
"bogus bad",
}
def test_list_flow_runs_statuses(fc, mocked_responses):
"""Verify that orderings are serialized as expected."""
mocked_responses.add("GET", f"{flows_client.PROD_FLOWS_BASE_URL}/flows/-/runs")
fc.list_flow_runs(
"-",
statuses=("SUCCEEDED", "FAILED"),
authorizer=fc.authorizer,
)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert set(data["filter_status"].split(",")) == {"SUCCEEDED", "FAILED"}
def test_list_flow_runs_call_enumerate_runs(fc, monkeypatch):
"""Verify that calls to enumerate_runs() pass all variables."""
expected = {
# Explicit
"statuses": "--statuses--",
"roles": "--roles--",
"marker": "--marker--",
"per_page": "--per_page--",
"filters": "--filters--",
"orderings": "--orderings--",
"role": "--role--",
# Implicit kwargs
"authorizer": "--authorizer--",
}
additional = {
"flow_id": None,
"flow_scope": "--flow_scope--",
}
mock = Mock()
monkeypatch.setattr(fc, "enumerate_runs", mock)
fc.list_flow_runs(**expected, **additional)
mock.assert_called_once_with(**expected)
@pytest.mark.parametrize(
"run_managers, run_monitors, expected, message",
(
(None, None, {}, "empty values should be excluded"),
# Managers
([], None, {"run_managers": []}, "false-y run_managers must be included"),
(["1"], None, {"run_managers": ["1"]}, "run_managers must be included"),
# Monitors
(None, [], {"run_monitors": []}, "false-y run_monitors must be included"),
(None, ["1"], {"run_monitors": ["1"]}, "run_monitors must be included"),
),
)
def test_flow_action_update_managers_and_monitors(
fc, mocked_responses, run_managers, run_monitors, expected, message
):
"""Verify that managers and monitors are unconditionally included."""
mocked_responses.add("PUT", f"{flows_client.PROD_FLOWS_BASE_URL}/runs/bogus-id")
fc.flow_action_update(
# These arguments are being tested.
run_managers=run_managers,
run_monitors=run_monitors,
# Mandatory but irrelevant to the test.
action_id="bogus-id",
authorizer=fc.authorizer,
)
data = json.loads(mocked_responses.calls[0].request.body)
for key in ("run_managers", "run_monitors"):
if key in expected:
assert key in data, f"*{key}* must be included in the JSON data"
assert data[key] == expected[key], message
else:
assert key not in data, f"*{key}* must not be included in the JSON data"
def test_use_temporary_authorizer(fc):
"""Verify the authorizer instance variable is swapped temporarily."""
original = fc.authorizer
replacement = flows_client.AccessTokenAuthorizer("bogus")
with fc.use_temporary_authorizer(replacement):
assert fc.authorizer is replacement
assert fc.authorizer is original
with pytest.raises(ValueError):
with fc.use_temporary_authorizer(replacement):
assert fc.authorizer is replacement
raise ValueError
assert fc.authorizer is original
@pytest.mark.parametrize("method", ("status", "log", "cancel", "release", "resume"))
def test_action_client_pass_through_calls(fc, method, monkeypatch):
"""Verify that the correct ActionClient methods are called.
There is no other validation performed except that the correct
ActionClient method is called.
"""
mock = Mock()
monkeypatch.setattr(flows_client.ActionClient, "new_client", lambda *_, **__: mock)
getattr(fc, f"flow_action_{method}")(
flow_id="bogus-id",
flow_scope="bogus-scope",
flow_action_id="bogus-action-id",
authorizer=fc.authorizer,
)
getattr(mock, method).assert_called_once()
| 36,535 | 35.609218 | 87 |
py
|
globus-automate-client
|
globus-automate-client-main/tests/test_init.py
|
import inspect
import globus_automate_client
def test_all_exports():
"""Validate that all imported names are listed in __all__."""
imported = set(
name
for name in dir(globus_automate_client)
if (
not name.startswith("__")
and not inspect.ismodule(getattr(globus_automate_client, name))
)
)
exported = set(globus_automate_client.__all__)
assert imported == exported, "imported and exported names do not match"
| 490 | 24.842105 | 75 |
py
|
globus-automate-client
|
globus-automate-client-main/tests/test_sdk_v3.py
|
from typing import Iterable
import pytest
import globus_automate_client
from globus_automate_client.cli.constants import (
ActionRole,
ActionRoleAllNames,
ActionRoleDeprecated,
FlowRole,
FlowRoleAllNames,
FlowRoleDeprecated,
)
def test_import():
assert bool(globus_automate_client)
@pytest.mark.parametrize(
"combo, supported, deprecated",
(
(ActionRoleAllNames, ActionRole, ActionRoleDeprecated),
(FlowRoleAllNames, FlowRole, FlowRoleDeprecated),
),
)
def test_role_name_compilations(
combo: Iterable,
supported: Iterable,
deprecated: Iterable,
):
"""Ensure combination role name enums are perfect copies."""
combo_names = {i.value for i in combo}
supported_names = {i.value for i in supported}
deprecated_names = {i.value for i in deprecated}
assert combo_names == supported_names | deprecated_names
| 898 | 22.051282 | 64 |
py
|
globus-automate-client
|
globus-automate-client-main/tests/test_action_client.py
|
import json
import urllib.parse
import uuid
import pytest
import responses
from globus_automate_client import action_client
@pytest.fixture
def ac():
yield action_client.ActionClient()
@pytest.mark.parametrize(
"data, expected",
(
({"globus_auth_scope": "success"}, "success"),
({}, ""),
(None, ""),
),
)
def test_action_scope(ac, mocked_responses, data, expected):
"""Validate the behavior of the action_scope property."""
mocked_responses.add(
method=responses.GET,
url=action_client.PRODUCTION_ACTIONS_BASE_URL,
json=data,
)
assert ac._action_scope is None
assert ac.action_scope == expected
assert ac._action_scope == expected # Instance variable
assert ac.action_scope == expected # Cache behavior
@pytest.mark.parametrize(
"name, method",
(
("status", "GET"),
("resume", "POST"),
("cancel", "POST"),
("release", "POST"),
),
)
def test_trivial_methods(ac, mocked_responses, name, method):
"""Validate the URL used with trivial requests."""
action_id = "bogus"
url = f"{action_client.PRODUCTION_ACTIONS_BASE_URL}/{action_id}/{name}"
mocked_responses.add(method=method, url=url)
getattr(ac, name)(action_id) # Dynamically get and call the method by name
assert mocked_responses.calls[0].request.url == url
@pytest.mark.parametrize("request_id", ("custom", None))
def test_run_with_request_id(ac, mocked_responses, monkeypatch, request_id):
"""Validate that run() uses a specified request ID or generates a new one."""
url = f"{action_client.PRODUCTION_ACTIONS_BASE_URL}/run"
mocked_responses.add(method="POST", url=url)
monkeypatch.setattr(uuid, "uuid4", lambda: "system")
ac.run(body={}, request_id=request_id)
if request_id is None:
assert b"system" in mocked_responses.calls[0].request.body
else:
assert request_id.encode("utf8") in mocked_responses.calls[0].request.body
@pytest.mark.parametrize("force_path", ("/custom", None))
def test_run_with_force_path(ac, mocked_responses, force_path):
"""Validate that run() uses *force_path*, if specified."""
url = f"{action_client.PRODUCTION_ACTIONS_BASE_URL}{force_path or '/run'}"
mocked_responses.add(method="POST", url=url)
ac.run(body={}, force_path=force_path)
assert mocked_responses.calls[0].request.url == url
@pytest.mark.parametrize(
"kwargs, expected",
(
# Managers
({"manage_by": ["a"]}, {"manage_by": {"a"}}),
({"run_managers": ["b"]}, {"manage_by": {"b"}}),
({"manage_by": ["a"], "run_managers": ["b"]}, {"manage_by": {"a", "b"}}),
# Monitors
({"monitor_by": ["a"]}, {"monitor_by": {"a"}}),
({"run_monitors": ["b"]}, {"monitor_by": {"b"}}),
({"monitor_by": ["a"], "run_monitors": ["b"]}, {"monitor_by": {"a", "b"}}),
),
)
def test_run_with_managers_and_monitors(ac, mocked_responses, kwargs, expected):
"""Validate that run() uses managers and monitors, including aliases."""
mocked_responses.add(
method="POST", url=f"{action_client.PRODUCTION_ACTIONS_BASE_URL}/run"
)
ac.run(body={}, **kwargs)
data = json.loads(mocked_responses.calls[0].request.body.decode("utf8"))
for key in ("monitor_by", "manage_by"):
if key in expected:
assert set(data[key]) == expected[key]
else:
assert key not in data, f"'{key}' must not be included in the request"
@pytest.mark.parametrize("reverse_order, expected", ((True, True), (False, False)))
def test_log_reverse_order(ac, mocked_responses, reverse_order, expected):
"""Validate the *reverse_order* parameter is managed correctly."""
action_id = "bogus"
url = f"{action_client.PRODUCTION_ACTIONS_BASE_URL}/{action_id}/log"
mocked_responses.add(method="GET", url=url, json={})
ac.log(action_id, reverse_order=reverse_order)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
assert ("reverse_order" in data) is expected
@pytest.mark.parametrize(
"marker, per_page, expected",
(
(None, None, {}),
("1:10", None, {"pagination_token": "1:10"}),
(None, 10, {"per_page": "10"}),
("1:10", 10, {"pagination_token": "1:10"}),
),
)
def test_log_pagination(ac, mocked_responses, marker, per_page, expected):
"""Validate the *marker* and *per_page* parameters interact correctly."""
action_id = "bogus"
url = f"{action_client.PRODUCTION_ACTIONS_BASE_URL}/{action_id}/log"
mocked_responses.add(method="GET", url=url, json={})
ac.log(action_id, marker=marker, per_page=per_page)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("pagination_token", "per_page"):
if key in expected:
assert data[key] == expected[key]
else:
assert key not in data, f"'{key}' must not appear in the query parameters"
def test_new_client():
"""Validate that new_client() instantiates classes correctly."""
action_url = "bogus-url"
authorizer = "bogus-authorizer"
ac = action_client.ActionClient.new_client(
action_url=action_url,
authorizer=authorizer,
)
assert "ActionClient" in ac.app_name
assert ac.base_url == action_url
assert ac.authorizer == authorizer
| 5,532 | 33.798742 | 86 |
py
|
globus-automate-client
|
globus-automate-client-main/tests/test_helpers.py
|
import pytest
from globus_automate_client import helpers
@pytest.mark.parametrize(
"args, expected, message",
(
# Null
((None, {}), None, "If nothing is specified, None must be returned"),
#
# List with no dict keys
(([], {}), set(), "empty list identity not maintained"),
((["1"], {}), {"1"}, "list identity not maintained"),
((["1", "1"], {}), {"1"}, "deduplication not performed"),
((["1", "2"], {}), {"1", "2"}, "unique items missing"),
#
# Dict with no list
((None, {}, "-"), None, "dict without matching key must return None"),
((None, {"k": ["1"]}), None, "dict with no key specified must return None"),
((None, {"k": ["1"]}, "-"), None, "dict without matching key must return None"),
((None, {"k": ["1"]}, "k"), {"1"}, "dict with matching key must be added"),
((None, {"k": ["1", "1"]}, "k"), {"1"}, "dict values must be de-duped"),
(
(None, {"k1": ["1"], "k2": ["1"]}, "k1", "k2"),
{"1"},
"dict values must be de-duped",
),
(
(None, {"k1": ["1"], "k2": ["2"]}, "k1", "k2"),
{"1", "2"},
"dict values must all be added",
),
#
# Dict with scalar values
((None, {"k": "1"}, "k"), {"1"}, "scalar value must be added"),
(
(None, {"k1": "1", "k2": "1"}, "k1", "k2"),
{"1"},
"scalar values must be de-duped",
),
#
# List combined with a dict
((["1"], {"k": ["2"]}, "-"), {"1"}, "combo w/o matching key must not be added"),
((["1"], {"k": ["2"]}, "k"), {"1", "2"}, "combo w/ matching key must be added"),
((["1"], {"k": ["1"]}, "k"), {"1"}, "combo values must be de-duplicated"),
((["1"], {"k": "2"}, "k"), {"1", "2"}, "combo w/ scalar value must be added"),
# Multiple values
(
(["1", "2"], {"k1": "3", "k2": ["4"], "k3": "5"}, "k2", "k3", "-"),
{"1", "2", "4", "5"},
"all values must be found",
),
),
)
def test_merge_keywords(args, expected, message):
"""Validate globus_automate_client.helpers.merge_lists()."""
original_key_count = len(args[1])
result = helpers.merge_keywords(*args)
final_key_count = len(args[1])
expected_key_count = original_key_count - (len(args) - 2)
if "-" in args:
expected_key_count += 1 # One key was bogus
if expected is None:
assert result is None, message
assert final_key_count == original_key_count, "dict unexpectedly modified"
else:
assert result is not None, "*result* must not be None"
assert set(result) == expected, message
assert final_key_count == expected_key_count, "dicts not modified correctly"
@pytest.mark.parametrize("canonical_value", (None, False, [], 0, "", {}))
def test_validate_aliases_canonical(canonical_value):
"""Verify the canonical value is chosen, even if it's False-y."""
value = helpers.validate_aliases(("a", canonical_value), ("b", None))
assert value is canonical_value
@pytest.mark.parametrize("alias_value", (False, [], 0, "", {}))
def test_validate_aliases_alias(alias_value):
"""Verify an alias value can be chosen, if it's not None."""
with pytest.raises(DeprecationWarning) as warning:
helpers.validate_aliases(("a", None), ("b", alias_value))
assert "b is deprecated" in warning.value.args[0]
assert warning.value.args[1] == "b"
assert warning.value.args[2] is alias_value
def test_validate_aliases_mixed_arguments():
"""Verify that aliases are mutually exclusive with the canonical value."""
with pytest.raises(ValueError) as error:
helpers.validate_aliases(
("a", 1),
("*b*", 2),
)
assert "cannot be combined with an alias" in error.value.args[0]
def test_validate_aliases_mutex_aliases_1():
"""Verify aliases are mutually exclusive."""
with pytest.raises(ValueError) as error:
helpers.validate_aliases(
("a", None),
("*b*", 0),
("*c*", False),
)
assert "*b* and *c*" in error.value.args[0]
def test_validate_aliases_mutex_aliases_2():
"""Verify aliases are mutually exclusive."""
with pytest.raises(ValueError) as error:
helpers.validate_aliases(
("a", None),
("*b*", 0),
("*c*", False),
("*d*", ""),
)
assert "*b*, *c*, and *d*" in error.value.args[0]
| 4,584 | 35.102362 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
autodoc_mock_imports = [
"globus_sdk",
"jsonschema",
"graphviz",
"click",
"typer",
"rich",
]
autodoc_typehints = "description"
# -- Project information -----------------------------------------------------
project = "Globus Automate Client"
copyright = "2020-2023, University of Chicago"
author = "Globus"
# The full version, including alpha/beta/rc tags
# release = "0.7.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_copybutton",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["cli_docs.rst"]
# The document containing the toctree directive
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
html_title = "Globus Automate Client"
html_logo = "_static/images/globus-300x300-blue.png"
html_theme = "furo"
pygments_dark_style = "monokai"
html_static_path = ["_static"]
| 1,983 | 28.61194 | 79 |
py
|
globus-automate-client
|
globus-automate-client-main/docs/source/code_snippets/premade_authorizers.py
|
import os
from globus_sdk import AccessTokenAuthorizer
from globus_automate_client import FlowsClient
from globus_automate_client.cli.auth import CLIENT_ID
from globus_automate_client.flows_client import AllowedAuthorizersType
def authorizer_retriever(
flow_url: str, flow_scope: str, client_id: str
) -> AllowedAuthorizersType:
"""
This callback will be called when attempting to interact with a
specific Flow. The callback will receive the Flow url, Flow scope, and
client_id and can choose to use some, all or none of the kwargs. This is
expected to return an Authorizer which can be used to make authenticated
calls to the Flow.
The method used to acquire valid credentials is up to the user. Here, we
naively create an Authorizer using the same token everytime.
"""
flow_token = os.environ.get("MY_ACCESS_TOKEN", "")
return AccessTokenAuthorizer(flow_token)
# Create an AccessTokenAuthorizer using a token that has consents to the
# MANAGE_FLOWS_SCOPE. This lets the FlowsClient perform operations against the
# Flow's service i.e. create flow, update a flow, delete a flow
flows_service_token = os.environ.get("MANAGE_FLOWS_SCOPED_TOKEN", "")
flows_service_authorizer = AccessTokenAuthorizer(flows_service_token)
fc = FlowsClient.new_client(
client_id=CLIENT_ID,
authorizer=flows_service_authorizer,
authorizer_callback=authorizer_retriever,
)
my_flows = fc.list_flows()
print(my_flows)
# When running a specific Flow, the authorizer_retriever callback is called
# internally to make the authenticated call to the Flow
running_flow = fc.run_flow(
"1e6b4406-ee3d-4bc5-9198-74128e108111", None, {"echo_string": "hey"}
)
print(running_flow)
# It's possible to create an Authorizer and pass it as a kwarg to the flow
# operation. This usage will not use the authorizer_callback:
running_flow_2 = fc.run_flow(
"1e6b4406-ee3d-4bc5-9198-74128e108111",
None,
{"echo_string": "hey"},
authorizer=AccessTokenAuthorizer("..."),
)
print(running_flow_2)
| 2,037 | 34.137931 | 78 |
py
|
globus-automate-client
|
globus-automate-client-main/docs/source/code_snippets/runner.py
|
import logging
import os
import pathlib
import queue
import sys
# The flow to run.
FLOW_ID = "your-flow-id-here"
# The flow will be run X seconds after the most recent filesystem event is received.
# If no filesystem events are ever received, the flow will not be run.
COOL_OFF_TIME_S = 60
logging.basicConfig(
level=logging.WARNING, # Eliminate INFO messages from the Globus SDK.
format="%(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
log = logging.getLogger(__name__)
try:
from globus_automate_client import create_flows_client
except ImportError:
log.error(
"The globus_automate_client package is not installed."
" (Do you need to activate a virtual environment?)"
)
sys.exit(1)
try:
import watchdog
import watchdog.events
import watchdog.observers
except ImportError:
log.error(
"The watchdog package is not installed."
" (Do you need to activate a virtual environment?)"
)
sys.exit(1)
class Handler(watchdog.events.FileSystemEventHandler):
def __init__(self, events: queue.Queue):
self.events = events
def dispatch(self, event):
"""Put all filesystem events in a queue."""
self.events.put(event)
def main():
try:
path = pathlib.Path(sys.argv[1]).absolute()
except IndexError:
path = pathlib.Path(os.getcwd()).absolute()
log.warning(f"Monitoring {path}")
log.warning("Press CTRL-C to exit (on Windows, press CTRL-BREAK)")
event_queue = queue.Queue(maxsize=-1)
handler = Handler(event_queue)
observer = watchdog.observers.Observer()
observer.schedule(handler, str(path), recursive=True)
observer.start()
flows_client = create_flows_client()
try:
timeout = None
files = set()
while True:
try:
event = event_queue.get(block=True, timeout=timeout)
except queue.Empty:
# .get() timed out.
# It's now been COOL_OFF_TIME_S seconds since the last filesystem event.
# Reset the timeout for the next batch of files and run the flow.
timeout = None
log.warning(f"Running the flow ({len(files)} paths were modified)")
flows_client.run_flow(
flow_id=FLOW_ID,
flow_scope=None,
flow_input={
"count": len(files),
},
label=f"[AUTO] File system changes detected ({len(files)} paths)",
)
files = set()
else:
# .get() returned a filesystem event.
# Make sure the next .get() call times out after COOL_OFF_TIME_S.
timeout = COOL_OFF_TIME_S
files.add(event.src_path)
event_queue.task_done()
except KeyboardInterrupt:
pass
finally:
observer.stop()
observer.join()
if __name__ == "__main__":
main()
| 3,029 | 27.857143 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/graphviz_rendering.py
|
import json
from typing import Any, Dict, List, Mapping, Optional
from graphviz import Digraph
_SHAPE_TYPES = {
"Choice": {"shape": "diamond"},
"Action": {"shape": "box"},
"Succeed": {"shape": "box", "style": "rounded"},
}
_COLOR_PRECEDENCE = ["", "yellow", "orange", "green", "red"]
def json_to_label_text(json_dict: Mapping[str, Any]) -> str:
label_text = json.dumps(json_dict, indent=1)
label_text = label_text.replace("\n", '<br ALIGN="LEFT"/>')
return label_text
def state_colors_for_log(flow_action_log_entries: List[Mapping]) -> Dict[str, str]:
color_dict: Dict[str, str] = {}
for log_entry in flow_action_log_entries:
state_name = log_entry.get("details", {}).get("state_name")
if state_name is not None:
code = log_entry.get("code", "")
cur_state_color_precedence = _COLOR_PRECEDENCE.index(
color_dict.get(state_name, "")
)
color = ""
if code.endswith("Completed"):
color = "green"
elif code.endswith("Started"):
color = "yellow"
elif code == "ActionPolled":
color = "orange"
if _COLOR_PRECEDENCE.index(color) > cur_state_color_precedence:
color_dict[state_name] = color
return color_dict
def graphviz_format(
flow: Dict[str, Any], state_colors: Optional[Dict[str, str]] = None
) -> Digraph:
states = flow.get("States")
graph = Digraph()
if state_colors is None:
state_colors = {}
if isinstance(states, dict):
for state_name, state_def in states.items():
state_type = state_def.get("Type")
# At least on Choice, Default also exists as a next state name
next_state = state_def.get("Next", state_def.get("Default"))
node_params = _SHAPE_TYPES.get(state_type, {"shape": "ellipse"})
node_params["label"] = state_name
parameters = state_def.get("Parameters")
if parameters:
parameter_text = json_to_label_text(parameters)
node_params["label"] = node_params["label"] + "<br/>" + parameter_text
else:
input_path = state_def.get("InputPath")
if input_path:
node_params["label"] = (
node_params["label"] + "<br/>" + f"InputPath: {input_path}"
)
if state_name in state_colors:
node_params["fillcolor"] = state_colors[state_name]
node_params["style"] = "filled"
node_params["label"] = "<" + node_params["label"] + '<br ALIGN="LEFT"/>>'
graph.node(state_name, **node_params)
if next_state:
graph.edge(state_name, next_state)
choices = state_def.get("Choices", [])
for choice in choices:
choice_next = choice.pop("Next")
choice_text = "<" + json_to_label_text(choice) + '<br ALIGN="LEFT"/>>'
graph.edge(state_name, choice_next, label=choice_text, style="dotted")
return graph
| 3,145 | 36.903614 | 86 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/client_helpers.py
|
import typing as t
from globus_sdk.authorizers import GlobusAuthorizer
from globus_automate_client.action_client import ActionClient
from globus_automate_client.cli.auth import CLIENT_ID, get_cli_authorizer
from globus_automate_client.flows_client import (
MANAGE_FLOWS_SCOPE,
PROD_FLOWS_BASE_URL,
FlowsClient,
)
def create_action_client(
action_url: str,
action_scope: t.Optional[str] = None,
client_id: str = CLIENT_ID,
) -> ActionClient:
"""
A helper function to handle creating a properly authenticated ``ActionClient``
which can operate against *Globus Action Provider Interface* compliant Action
Providers. This helper will create an ``ActionClient`` by searching for and
storing tokens on the local filesystem, potentially triggering a log-in flow
if the requested tokens are not found locally.
Given the ``action_url`` for a specific ActionProvider, this function will
attempt to create a valid ``ActionClient`` for interacting with that
ActionProvider. If the ``action_scope`` is not provided, this function will
attempt to discover the ``action_scope`` by querying the target Action
Provider's introspection endpoint. If the Action Provider is not configured
to allow public, unauthenticated access to its introspection endpoint, the
``action_scope`` will be non-discoverable and authentication will fail.
With the ``action_scope`` available, the function will search for a valid
token in the local filesystem cache. In the event that tokens for the scope
cannot be loaded, an interactive login will be triggered. Once
tokens have been loaded, an Authorizer is created and used to instantiate
the ``ActionClient`` which can be used for operations against that Action
Provider.
:param action_url: The URL address at which the target Action Provider
exists
:param action_scope: The target Action Provider's Globus Auth Scope used
for authenticating access to it
:param client_id: The ID for the Native App Auth Client which will be
triggering the login flow for this ActionClient
**Examples**
.. code-block:: pycon
>>> from globus_automate_client import create_action_client
>>> # Create an ActionClient for the HelloWorld Action
>>> ac = create_action_client("https://actions.globus.org/hello_world")
>>> # Run an Action and check its results
>>> resp = ac.run({"echo_string": "Hello from SDK"})
>>> assert resp.data["status"] == "SUCCEEDED"
"""
authorizer = get_cli_authorizer(
action_url=action_url, action_scope=action_scope, client_id=client_id
)
return ActionClient.new_client(action_url=action_url, authorizer=authorizer)
def cli_authorizer_callback(**kwargs):
flow_url = kwargs["flow_url"]
flow_scope = kwargs["flow_scope"]
client_id = kwargs["client_id"]
authorizer = get_cli_authorizer(flow_url, flow_scope, client_id)
return authorizer
def create_flows_client(
client_id: str = CLIENT_ID,
base_url: str = PROD_FLOWS_BASE_URL,
scope: str = MANAGE_FLOWS_SCOPE,
*,
authorizer: t.Optional[GlobusAuthorizer] = None,
authorizer_callback: t.Callable = cli_authorizer_callback,
http_timeout: int = 10
) -> FlowsClient:
"""
A helper function to handle creating a properly authenticated
``FlowsClient`` which can operate against the Globus Automate Flows service.
This function will attempt to load tokens for the ``MANAGE_FLOWS_SCOPE``from
the local filesystem, triggering a log-in if the requested tokens are not
found locally. Once tokens have been loaded, an Authorizer is created and
used to instantiate the ``FlowsClient``. Attempts to interact with a
specific Flow will similarly search for valid tokens in the local cache,
triggering an interactive log-in if they cannot be found.
:param scope: The Globus Auth scope to which the FlowsClient should be
created with consents to
:param client_id: The Globus ID to associate with this instance of the
FlowsClient
:param base_url: The URL at which the Globus Automate Flows service is
located
:param authorizer: An authorizer providing access to the Flows service.
If not provided, it will be created using the ``authorizer_callback``
:param authorizer_callback: A callback used to dynamically return
GlobusAuthorizers. If not provided, the Globus Automate CLI callback
will be used which triggers interactive logins and stores tokens
locally
:param http_timeout: Close any requests taking longer than this
parameter's value
**Examples**
.. code-block:: pycon
>>> from globus_automate_client import create_flows_client
>>> # Create an authenticated FlowsClient that can run operations against the Flows
>>> # service
>>> fc = create_flows_client()
>>> # Get a listing of runnable, deployed flows
>>> available_flows = fc.list_flows(["runnable_by"])
>>> for flow in available_flows.data["flows"]:
>>> print(flow)
"""
if authorizer is None:
authorizer = authorizer_callback(
flow_url=base_url, flow_scope=scope, client_id=client_id
)
return FlowsClient.new_client(
client_id,
base_url=base_url,
authorizer=authorizer,
authorizer_callback=authorizer_callback,
http_timeout=http_timeout,
)
| 5,518 | 40.810606 | 91 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/__init__.py
|
from .action_client import ActionClient
from .cli.auth import get_authorizer_for_scope
from .client_helpers import create_action_client, create_flows_client
from .flows_client import FlowsClient, validate_flow_definition
from .graphviz_rendering import graphviz_format, state_colors_for_log
from .queues_client import QueuesClient, create_queues_client
__all__ = (
"ActionClient",
"create_action_client",
"FlowsClient",
"create_flows_client",
"validate_flow_definition",
"QueuesClient",
"create_queues_client",
"get_authorizer_for_scope",
"graphviz_format",
"state_colors_for_log",
)
| 625 | 30.3 | 69 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/action_client.py
|
import os
import uuid
from typing import Any, Dict, Iterable, List, Mapping, Optional, Type, TypeVar, Union
from urllib.parse import quote, urlparse
from globus_sdk import BaseClient, GlobusHTTPResponse
from globus_sdk.authorizers import GlobusAuthorizer
from .helpers import merge_keywords
_ActionClient = TypeVar("_ActionClient", bound="ActionClient")
PRODUCTION_ACTIONS_BASE_URL = "https://actions.globus.org"
_ENVIRONMENT_ACTIONS_BASE_URLS = {
None: PRODUCTION_ACTIONS_BASE_URL,
"prod": PRODUCTION_ACTIONS_BASE_URL,
"production": PRODUCTION_ACTIONS_BASE_URL,
"sandbox": "https://sandbox.actions.automate.globus.org",
"integration": "https://integration.actions.automate.globus.org",
"test": "https://test.actions.automate.globus.org",
"preview": "https://preview.actions.automate.globus.org",
"staging": "https://staging.actions.automate.globus.org",
}
def _get_actions_base_url_for_environment():
environ = os.environ.get("GLOBUS_SDK_ENVIRONMENT")
if environ not in _ENVIRONMENT_ACTIONS_BASE_URLS:
raise ValueError(f"Unknown value for GLOBUS_SDK_ENVIRONMENT: {environ}")
return _ENVIRONMENT_ACTIONS_BASE_URLS[environ]
class ActionClient(BaseClient):
base_path: str = ""
service_name: str = "actions"
def __init__(self, *args, **kwargs):
if "base_url" not in kwargs:
kwargs["base_url"] = _get_actions_base_url_for_environment()
super().__init__(*args, **kwargs)
self._action_scope: Optional[str] = None
@property
def action_scope(self) -> str:
"""
This property can be used to determine an ``ActionClient``'s
``action_scope``. Internally, this property will introspect the Action
Provider at the URL for which the ``ActionClient`` was created. If the
``Action Provider`` is not public, a valid ``Globus Authorizer`` will
have to have been provided on initialization to the ``ActionClient``.
Otherwise, this call will fail.
"""
if self._action_scope is None:
resp = self.introspect()
if resp.data is None:
self._action_scope = ""
else:
self._action_scope = resp.data.get("globus_auth_scope", "")
return self._action_scope
def introspect(self, **_) -> GlobusHTTPResponse:
"""
Introspect the details of an Action Provider to discover information
such as its expected ``action_scope``, its ``input_schema``, and who to
contact when there's trouble.
"""
return self.get("")
# noinspection PyIncorrectDocstring
def run(
self,
body: Mapping[str, Any],
request_id: Optional[str] = None,
manage_by: Optional[Iterable[str]] = None,
monitor_by: Optional[Iterable[str]] = None,
label: Optional[str] = None,
tags: Optional[List[str]] = None,
force_path: Optional[str] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""
Invoke the Action Provider to execute an Action with the given
parameters.
:param body: The Action Provider specific input required to execute an
Action payload
:param request_id: An optional identifier that serves to de-duplicate
requests to the Action Provider
:param manage_by: A series of Globus identities which may alter
this Action's execution. The principal value is the user's or
group's UUID prefixed with either 'urn:globus:groups:id:' or
'urn:globus:auth:identity:'
:param monitor_by: A series of Globus identities which may
view the state of this Action. The principal value is the user's or
group's UUID prefixed with either 'urn:globus:groups:id:' or
'urn:globus:auth:identity:'
:param force_path: A URL to use for running this action, ignoring any
previous configuration
:param label: Set a label for the Action that is run.
:param tags: A list of tags to associate with the Run.
:param run_monitors: May be used as an alias for ``monitor_by``
:param run_managers: May be used as an alias for ``manage_by``
"""
if request_id is None:
request_id = str(uuid.uuid4())
path = "/run"
if force_path:
path = force_path
body = {
"request_id": str(request_id),
"body": body,
"monitor_by": merge_keywords(monitor_by, kwargs, "run_monitors"),
"manage_by": merge_keywords(manage_by, kwargs, "run_managers"),
"label": label,
"tags": tags,
}
# Remove None items from the temp_body
data = {k: v for k, v in body.items() if v is not None}
return self.post(path, data=data)
def status(self, action_id: str) -> GlobusHTTPResponse:
"""
Query the Action Provider for the status of executed Action
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
return self.get(f"{quote(action_id)}/status")
def get_definition(self, action_id: str) -> GlobusHTTPResponse:
"""
Get the flow definition for a given run ID.
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
return self.get(f"/runs/{action_id}/definition")
def resume(self, action_id: str) -> GlobusHTTPResponse:
"""
Resume an INACTIVE action. Corrective action must have been taken prior to invoking
this method, including the possibility of consenting to additional permissions
and using tokens issued by those consents when creating this client. These
consents would commonly be required when an Action is INACTIVE and shows the code
ConsentRequired.
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
return self.post(f"{quote(action_id)}/resume")
def cancel(self, action_id: str) -> GlobusHTTPResponse:
"""
Cancel a currently executing Action on an Action Provider
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
return self.post(f"{quote(action_id)}/cancel")
def release(self, action_id: str) -> GlobusHTTPResponse:
"""
Remove the history of an Action's execution from an Action Provider
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
return self.post(f"{quote(action_id)}/release")
def log(
self,
action_id: str,
limit: int = 10,
reverse_order: bool = False,
marker: Optional[str] = None,
per_page: Optional[int] = None,
) -> GlobusHTTPResponse:
"""
Retrieve an Action's execution log history. Not all ``Action Providers``
support this operation.
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
:param limit: A integer specifying how many log records to return
:param reverse_order: Display the Action states in reverse-
chronological order
:param marker: A pagination_token indicating the page of results to
return and how many entries to return. Not all ActionProviders will
support this parameter.
:param per_page: The number of results to return per page. If
supplied a pagination_token, this parameter has no effect. Not all
ActionProviders will support this parameter.
"""
# *reverse_order* MUST BE None to prevent reversing the sort order.
# Any other value, including False, will reverse the sort order.
params: Dict[str, Union[int, str, bool, None]] = {
"reverse_order": True if reverse_order else None,
"limit": limit,
}
if marker is not None:
params["pagination_token"] = marker
if per_page is not None and marker is None:
params["per_page"] = per_page
return self.get(f"{quote(action_id)}/log", query_params=params)
@classmethod
def new_client(
cls: Type[_ActionClient],
action_url: str,
authorizer: Optional[GlobusAuthorizer],
http_timeout: int = 10,
) -> _ActionClient:
"""
Classmethod to simplify creating an ActionClient. Use this method when
attemping to create an ActionClient with pre-existing credentials or
authorizers.
:param action_url: The url at which the target Action Provider is
located.
:param authorizer: The authorizer to use for validating requests to the
Action Provider.
:param http_timeout: The amount of time to wait for connections to
the Action Provider to be made.
**Examples**
.. code-block:: pycon
>>> auth = ...
>>> url = "https://actions.globus.org/hello_world"
>>> ac = ActionClient.new_client(url, auth)
>>> print(ac.run({"echo_string": "Hello from SDK"}))
"""
verify_ssl = urlparse(action_url).hostname not in {"localhost", "127.0.0.1"}
return cls(
app_name="Globus Automate SDK - ActionClient",
base_url=action_url,
authorizer=authorizer,
transport_params={
"http_timeout": http_timeout,
"verify_ssl": verify_ssl,
},
)
| 9,799 | 37.582677 | 91 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/flows_client.py
|
import contextlib
import json
import os
import warnings
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import quote, urljoin, urlparse
from globus_sdk import (
AccessTokenAuthorizer,
BaseClient,
ClientCredentialsAuthorizer,
GlobusHTTPResponse,
RefreshTokenAuthorizer,
)
from globus_sdk.authorizers import GlobusAuthorizer
from jsonschema import Draft7Validator
from globus_automate_client import ActionClient
from .helpers import merge_keywords, validate_aliases
PROD_FLOWS_BASE_URL = "https://flows.globus.org"
_ENVIRONMENT_FLOWS_BASE_URLS = {
None: PROD_FLOWS_BASE_URL,
"prod": PROD_FLOWS_BASE_URL,
"production": PROD_FLOWS_BASE_URL,
"sandbox": "https://sandbox.flows.automate.globus.org",
"integration": "https://integration.flows.automate.globus.org",
"test": "https://test.flows.automate.globus.org",
"preview": "https://preview.flows.automate.globus.org",
"staging": "https://staging.flows.automate.globus.org",
}
FLOWS_CLIENT_ID = "eec9b274-0c81-4334-bdc2-54e90e689b9a"
MANAGE_FLOWS_SCOPE = f"https://auth.globus.org/scopes/{FLOWS_CLIENT_ID}/manage_flows"
VIEW_FLOWS_SCOPE = f"https://auth.globus.org/scopes/{FLOWS_CLIENT_ID}/view_flows"
RUN_FLOWS_SCOPE = f"https://auth.globus.org/scopes/{FLOWS_CLIENT_ID}/run"
RUN_STATUS_SCOPE = f"https://auth.globus.org/scopes/{FLOWS_CLIENT_ID}/run_status"
RUN_MANAGE_SCOPE = f"https://auth.globus.org/scopes/{FLOWS_CLIENT_ID}/run_manage"
NULL_SCOPE = f"https://auth.globus.org/scopes/{FLOWS_CLIENT_ID}/null"
ALL_FLOW_SCOPES = (
MANAGE_FLOWS_SCOPE,
VIEW_FLOWS_SCOPE,
RUN_FLOWS_SCOPE,
RUN_STATUS_SCOPE,
RUN_MANAGE_SCOPE,
)
_FlowsClient = TypeVar("_FlowsClient", bound="FlowsClient")
AllowedAuthorizersType = Union[
AccessTokenAuthorizer, RefreshTokenAuthorizer, ClientCredentialsAuthorizer
]
AuthorizerCallbackType = Callable[..., AllowedAuthorizersType]
class FlowValidationError(Exception):
def __init__(self, errors: Iterable[str]):
message = "; ".join(errors)
super().__init__(message)
def _all_vals_for_keys(
key_name_set: Set[str],
d: Mapping[str, Any],
stop_traverse_key_set: Optional[Set[str]] = None,
) -> Set[str]:
val_set = set()
for k, v in d.items():
if k in key_name_set and isinstance(v, str):
val_set.add(v)
if stop_traverse_key_set is not None and k in stop_traverse_key_set:
continue
if isinstance(v, dict):
val_set.update(
_all_vals_for_keys(
key_name_set, v, stop_traverse_key_set=stop_traverse_key_set
)
)
elif isinstance(v, list):
for val in v:
if k in key_name_set and isinstance(val, str):
val_set.add(val)
elif isinstance(val, dict):
val_set.update(
_all_vals_for_keys(
key_name_set,
val,
stop_traverse_key_set=stop_traverse_key_set,
)
)
return val_set
def validate_flow_definition(flow_definition: Mapping[str, Any]) -> None:
"""Perform local, JSONSchema based validation of a Flow definition.
This is validation on the basic structure of your Flow definition such as required
fields / properties for the various state types and the overall structure of the
Flow. This schema based validation *does not* do any validation of input values or
parameters passed to Actions as those Actions define their own schemas and the Flow
may generate or compute values to these Actions and thus static, schema based
validation cannot determine if the Action parameter values generated during
execution are correct.
The input is the dictionary containing the flow definition.
If the flow passes validation, no value is returned. If validation errors are found,
a FlowValidationError exception will be raised containing a string message
describing the error(s) encountered.
"""
schema_path = Path(__file__).parent / "flows_schema.json"
with schema_path.open() as sf:
flow_schema = json.load(sf)
validator = Draft7Validator(flow_schema)
errors = validator.iter_errors(flow_definition)
error_msgs = set()
for error in errors:
if error.path:
# Elements of the error path may be integers or other non-string types,
# but we need strings for use with join()
error_path_for_message = ".".join([str(x) for x in error.path])
error_message = (
f"'{error_path_for_message}' invalid due to {error.message}: "
f" {error.context}"
)
else:
error_message = f"{error.message}: {error.context}"
error_msgs.add(error_message)
if error_msgs:
raise FlowValidationError(error_msgs)
# We can be aggressive about indexing these maps as it has already passed schema
# validation
state_names = set(flow_definition["States"].keys())
flow_state_refs = _all_vals_for_keys(
{"Next", "Default", "StartAt"},
flow_definition,
stop_traverse_key_set={"Parameters"},
)
unreferenced = state_names - flow_state_refs
not_present = flow_state_refs - state_names
if len(unreferenced) > 0:
error_msgs.add(
"The following states are defined but not referenced by any "
f"other states in the flow: {unreferenced}"
)
if len(not_present) > 0:
error_msgs.add(
"The following states are referenced but are not defined by the"
f" flow: {not_present}"
)
if error_msgs:
raise FlowValidationError(error_msgs)
return
def validate_input_schema(input_schema: Optional[Mapping[str, Any]]) -> None:
if input_schema is None:
raise FlowValidationError(["No input schema provided"])
validator = Draft7Validator(Draft7Validator.META_SCHEMA)
errors = validator.iter_errors(input_schema)
error_msgs = set()
for error in errors:
if error.path:
# Elements of the error path may be integers or other non-string types,
# but we need strings for use with join()
error_path_for_message = ".".join([str(x) for x in error.path])
error_message = (
f"'{error_path_for_message}' invalid due to {error.message}: "
f" {error.context}"
)
else:
error_message = f"{error.message}: {error.context}"
error_msgs.add(error_message)
if error_msgs:
raise FlowValidationError(error_msgs)
def _get_flows_base_url_for_environment():
environ = os.environ.get("GLOBUS_SDK_ENVIRONMENT")
if environ not in _ENVIRONMENT_FLOWS_BASE_URLS:
raise ValueError(f"Unknown value for GLOBUS_SDK_ENVIRONMENT: {environ}")
return _ENVIRONMENT_FLOWS_BASE_URLS[environ]
def handle_aliases(
canonical_item: Tuple[str, Any],
*aliases: Tuple[str, Any],
) -> Any:
"""Validate aliases, and handle warnings in an API context."""
try:
return validate_aliases(canonical_item, *aliases)
except DeprecationWarning as warning:
warnings.warn(warning.args[0], category=DeprecationWarning, stacklevel=1)
return warning.args[2]
class FlowsClient(BaseClient):
"""
This is a specialized type of the Globus Auth service's ``BaseClient`` used
to interact with the Globus Flows service. Any keyword arguments given are
passed through to the ``BaseClient`` constructor.
"""
allowed_authorizer_types = (
AccessTokenAuthorizer,
RefreshTokenAuthorizer,
ClientCredentialsAuthorizer,
)
base_path: str = ""
service_name: str = "flows"
def __init__(
self,
client_id: str,
get_authorizer_callback: AuthorizerCallbackType,
**kwargs,
) -> None:
if "base_url" not in kwargs:
kwargs["base_url"] = _get_flows_base_url_for_environment()
super().__init__(**kwargs)
self.client_id = client_id
self.get_authorizer_callback = get_authorizer_callback
@contextlib.contextmanager
def use_temporary_authorizer(self, authorizer):
"""Temporarily swap out the authorizer instance variable.
This is a context manager. Use it like this:
.. code-block:: python
authorizer = self._get_authorizer_for_flow(...)
with self.alternate_authorizer(authorizer):
...
"""
original, self.authorizer = self.authorizer, authorizer
try:
yield
finally:
self.authorizer = original
def deploy_flow(
self,
flow_definition: Mapping[str, Any],
title: str,
subtitle: Optional[str] = None,
description: Optional[str] = None,
keywords: Iterable[str] = (),
flow_viewers: Iterable[str] = (),
flow_starters: Iterable[str] = (),
flow_administrators: Iterable[str] = (),
subscription_id: Optional[str] = None,
input_schema: Optional[Mapping[str, Any]] = None,
validate_definition: bool = True,
validate_schema: bool = True,
dry_run: bool = False,
**kwargs,
) -> GlobusHTTPResponse:
"""Deploys a Flow definition to the Flows service, making the Flow
available for execution on the Globus Automate Flows Service.
:param flow_definition: A mapping corresponding to a Globus Flows
definition.
:param title: A simple, human-readable title for the deployed Flow
:param subtitle: A longer, more verbose title for the deployed Flow
:param description: A long form explanation of the Flow's purpose or
usage
:param keywords: A series of words which may help categorize or make the
Flow discoverable
:param flow_viewers: A series of Globus identities which may discover and
view the Flow definition
:param flow_starters: A series of Globus identities which may run an
instance of this Flow
:param flow_administrators: A series of Globus identities which may update
this Flow's definition
:param subscription_id: The Globus Subscription which will be used to
make this flow managed.
:param input_schema: A mapping representing the JSONSchema used to
validate input to this Flow. If not supplied, no validation will be
done on input to this Flow.
:param validate_definition: Set to ``True`` to validate the provided
``flow_definition`` before attempting to deploy the Flow.
:param validate_schema: Set to ``True`` to validate the provided
``input_schema`` before attempting to deploy the Flow.
:param dry_run: Set to ``True`` to test whether the Flow can be
deployed successfully.
"""
if validate_definition:
validate_flow_definition(flow_definition)
if validate_schema:
validate_input_schema(input_schema)
# Handle aliases.
flow_viewers = handle_aliases(
("`flow_viewers`", flow_viewers),
("`viewers`", kwargs.pop("viewers", None)),
("`visible_to`", kwargs.pop("visible_to", None)),
)
flow_starters = handle_aliases(
("`flow_starters`", flow_starters),
("`starters`", kwargs.pop("starters", None)),
("`runnable_by`", kwargs.pop("runnable_by", None)),
)
flow_administrators = handle_aliases(
("`flow_administrators`", flow_administrators),
("`administrators`", kwargs.pop("administrators", None)),
("`administered_by`", kwargs.pop("administered_by", None)),
)
temp_body: Dict[str, Any] = {
"definition": flow_definition,
"title": title,
"subtitle": subtitle,
"description": description,
"keywords": keywords,
"flow_viewers": flow_viewers,
"flow_starters": flow_starters,
"flow_administrators": flow_administrators,
"subscription_id": subscription_id,
}
# Remove None / empty list items from the temp_body.
req_body = {k: v for k, v in temp_body.items() if v}
# Add the input_schema last since an empty input schema is valid.
if input_schema is not None:
req_body["input_schema"] = input_schema
url = "/flows"
if dry_run:
url = "/flows/dry-run"
return self.post(url, data=req_body, **kwargs)
def update_flow(
self,
flow_id: str,
flow_definition: Optional[Mapping[str, Any]] = None,
title: Optional[str] = None,
subtitle: Optional[str] = None,
description: Optional[str] = None,
keywords: Optional[Iterable[str]] = None,
flow_viewers: Optional[Iterable[str]] = None,
flow_starters: Optional[Iterable[str]] = None,
flow_administrators: Optional[Iterable[str]] = None,
subscription_id: Optional[str] = None,
input_schema: Optional[Mapping[str, Any]] = None,
validate_definition: bool = True,
validate_schema: bool = True,
**kwargs,
) -> GlobusHTTPResponse:
"""
Updates a deployed Flow's definition or metadata. This method will
preserve the existing Flow's values for fields which are not
submitted as part of the update.
:param flow_id: The UUID for the Flow that will be updated
:param flow_definition: A mapping corresponding to a Globus Flows
definition
:param title: A simple, human-readable title for the deployed Flow
:param subtitle: A longer, more verbose title for the deployed Flow
:param description: A long form explanation of the Flow's purpose or
usage
:param keywords: A series of words which may help categorize or make the
Flow discoverable
:param flow_viewers: A series of Globus identities which may discover and
view the Flow definition
:param flow_starters: A series of Globus identities which may run an
instance of this Flow
:param flow_administrators: A series of Globus identities which may update
this Flow's definition
:param subscription_id: The Globus Subscription which will be used to
make this flow managed.
:param input_schema: A mapping representing the JSONSchema used to
validate input to this Flow. If not supplied, no validation will be
done on input to this Flow.
:param validate_definition: Set to ``True`` to validate the provided
``flow_definition`` before attempting to update the Flow.
:param validate_schema: Set to ``True`` to validate the provided
``input_schema`` before attempting to update the Flow.
"""
if validate_definition and flow_definition is not None:
validate_flow_definition(flow_definition)
if validate_schema and input_schema is not None:
validate_input_schema(input_schema)
# Handle aliases.
flow_viewers = handle_aliases(
("`flow_viewers`", flow_viewers),
("`viewers`", kwargs.pop("viewers", None)),
("`visible_to`", kwargs.pop("visible_to", None)),
)
flow_starters = handle_aliases(
("`flow_starters`", flow_starters),
("`starters`", kwargs.pop("starters", None)),
("`runnable_by`", kwargs.pop("runnable_by", None)),
)
flow_administrators = handle_aliases(
("`flow_administrators`", flow_administrators),
("`administrators`", kwargs.pop("administrators", None)),
("`administered_by`", kwargs.pop("administered_by", None)),
)
temp_body: Dict[str, Any] = {
"definition": flow_definition,
"title": title,
"subtitle": subtitle,
"description": description,
"keywords": keywords,
"flow_viewers": flow_viewers,
"flow_starters": flow_starters,
"flow_administrators": flow_administrators,
"subscription_id": subscription_id,
"input_schema": input_schema,
}
data = {k: v for k, v in temp_body.items() if v is not None}
return self.put(f"/flows/{flow_id}", data=data, **kwargs)
def get_flow(self, flow_id: str, **kwargs) -> GlobusHTTPResponse:
"""
Retrieve a deployed Flow's definition and metadata
:param flow_id: The UUID identifying the Flow for which to retrieve details
"""
return self.get(f"/flows/{quote(flow_id)}", **kwargs)
def list_flows(
self,
roles: Optional[Iterable[str]] = None,
marker: Optional[str] = None,
per_page: Optional[int] = None,
filters: Optional[dict] = None,
orderings: Optional[dict] = None,
role: Optional[str] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""Display all deployed Flows for which you have the selected role(s)
:param roles:
.. deprecated:: 0.12
Use ``role`` instead
See description for ``role`` parameter. Providing multiple roles behaves as
if only a single ``role`` value is provided and displays the equivalent of
the most permissive role.
:param role:
A role value specifying the minimum role-level permission which will
be displayed based on the follow precedence of role values:
- flow_viewer
- flow_starter
- flow_administrators
- flow_owner
Thus, if, for example, ``flow_starter`` is specified, flows for which the
user has the ``flow_starter``, ``flow_administrator`` or ``flow_owner``
roles will be returned.
:param marker: A pagination_token indicating the page of results to
return and how many entries to return. This is created by the Flows
service and returned by operations that support pagination.
:param per_page: The number of results to return per page. If
supplied a pagination_token, this parameter has no effect.
:param filters: A filtering criteria to apply to the resulting Flow
listing. The keys indicate the filter, the values indicate the
pattern to match. The returned data will be the result of a logical
AND between the filters. Patterns may be comma separated to produce
the result of a logical OR.
:param orderings: An ordering criteria to apply to the resulting
Flow listing. The keys indicate the field to order on, and
the value can be either ASC, for ascending order, or DESC, for
descending order. The first ordering criteria will be used to sort
the data, subsequent ordering criteria will be applied for ties.
Note: To ensure orderings are applied in the correct order, use an
OrderedDict if trying to apply multiple orderings.
"""
params = {}
# *role* takes precedence over *roles* (plural).
if role:
params["filter_role"] = role
elif roles:
params["filter_roles"] = ",".join(roles)
# *marker* takes precedence over *per_page*.
if marker:
params["pagination_token"] = marker
elif per_page:
params["per_page"] = str(per_page)
if orderings:
params["orderby"] = ",".join(
f"{field} {value}" for field, value in orderings.items()
)
if filters:
# Prevent *filters* from overwriting reserved keys.
filters.pop("filter_role", None)
filters.pop("filter_roles", None)
filters.pop("orderby", None)
filters.pop("pagination_token", None)
filters.pop("per_page", None)
params.update(filters)
return self.get("/flows", query_params=params, **kwargs)
def delete_flow(self, flow_id: str, **kwargs) -> GlobusHTTPResponse:
"""
Remove a Flow definition and its metadata from the Flows service
:param flow_id: The UUID identifying the Flow to delete
"""
return self.delete(f"/flows/{flow_id}", **kwargs)
def scope_for_flow(self, flow_id: str) -> str:
"""
Returns the scope associated with a particular Flow
:param flow_id: The UUID identifying the Flow's scope to lookup
"""
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
return ActionClient.new_client(
flow_url, authorizer=self.authorizer
).action_scope
def _get_authorizer_for_flow(
self, flow_id: str, flow_scope: Optional[str], extras: Dict[str, Any]
) -> AllowedAuthorizersType:
if "authorizer" in extras:
return extras.pop("authorizer")
if flow_scope is None:
flow_scope = self.scope_for_flow(flow_id)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
return self.get_authorizer_callback(
flow_url=flow_url,
flow_scope=flow_scope,
client_id=self.client_id,
)
def run_flow(
self,
flow_id: str,
flow_scope: Optional[str],
flow_input: Mapping,
run_managers: Optional[Iterable[str]] = None,
run_monitors: Optional[Iterable[str]] = None,
dry_run: bool = False,
label: Optional[str] = None,
tags: Optional[List[str]] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""
Run an instance of a deployed Flow with the given input.
:param flow_id: The UUID identifying the Flow to run
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param flow_input: A Flow-specific dictionary specifying the input
required for the Flow to run.
:param run_managers: A series of Globus identities which may alter
this Flow instance's execution. The principal value is the user's or
group's UUID prefixed with either 'urn:globus:groups:id:' or
'urn:globus:auth:identity:'
:param run_monitors: A series of Globus identities which may view this
Flow instance's execution state. The principal value is the user's
or group's UUID prefixed with either 'urn:globus:groups:id:' or
'urn:globus:auth:identity:'
:param label: An optional label which can be used to identify this run
:param tags: Tags that will be associated with this Run.
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
:param dry_run: Set to ``True`` to test what will happen if the Flow is run
without actually running the Flow.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
ac = ActionClient.new_client(flow_url, authorizer)
# Merge monitors and managers with aliases.
# If either list is empty it will be replaced with None
# to prevent empty lists from appearing in the JSON request.
run_monitors = merge_keywords(run_monitors, kwargs, "monitor_by") or None
run_managers = merge_keywords(run_managers, kwargs, "manage_by") or None
if dry_run:
path = flow_url + "/run/dry-run"
return ac.run(
flow_input,
manage_by=run_managers,
monitor_by=run_monitors,
force_path=path,
label=label,
tags=tags,
**kwargs,
)
else:
return ac.run(
flow_input,
manage_by=run_managers,
monitor_by=run_monitors,
label=label,
tags=tags,
**kwargs,
)
def flow_action_status(
self, flow_id: str, flow_scope: Optional[str], flow_action_id: str, **kwargs
) -> GlobusHTTPResponse:
"""
Determine the status for an Action that was launched by a Flow
:param flow_id: The UUID identifying the Flow which triggered the Action
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param flow_action_id: The ID specifying which Action's status to query
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
ac = ActionClient.new_client(flow_url, authorizer)
return ac.status(flow_action_id)
def get_flow_definition_for_run(
self, flow_id: str, flow_scope: Optional[str], flow_action_id: str, **kwargs
) -> GlobusHTTPResponse:
"""
Determine the status for an Action that was launched by a Flow
:param flow_id: The UUID identifying the Flow which triggered the Action
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param flow_action_id: The ID specifying which Action's status to query
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
ac = ActionClient.new_client(self.base_url, authorizer)
return ac.get_definition(flow_action_id)
def flow_action_resume(
self, flow_id: str, flow_scope: Optional[str], flow_action_id: str, **kwargs
) -> GlobusHTTPResponse:
"""
Resume a Flow Action which is in an INACTIVE state.
:param flow_id: The UUID identifying the Flow which triggered the Action
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically.
:param flow_action_id: The ID specifying the Action with an INACTIVE
status we want to resume.
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
ac = ActionClient.new_client(flow_url, authorizer)
return ac.resume(flow_action_id)
def flow_action_release(
self, flow_id: str, flow_scope: Optional[str], flow_action_id: str, **kwargs
) -> GlobusHTTPResponse:
"""
Remove the execution history for an Action that was launched by a Flow
:param flow_id: The UUID identifying the Flow which launched the Action
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param flow_action_id: The ID specifying the Action to release
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
ac = ActionClient.new_client(flow_url, authorizer)
return ac.release(flow_action_id)
def flow_action_cancel(
self, flow_id: str, flow_scope: Optional[str], flow_action_id: str, **kwargs
) -> GlobusHTTPResponse:
"""
Cancel the execution of an Action that was launched by a Flow
:param flow_id: The UUID identifying the Flow which launched the Action
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param flow_action_id: The ID specifying the Action we want to cancel
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
ac = ActionClient.new_client(flow_url, authorizer)
return ac.cancel(flow_action_id)
def enumerate_runs(
self,
roles: Optional[Iterable[str]] = None,
statuses: Optional[Iterable[str]] = None,
marker: Optional[str] = None,
per_page: Optional[int] = None,
filters: Optional[dict] = None,
orderings: Optional[dict] = None,
role: Optional[str] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""
Retrieve a listing of Runs the caller has access to. This operation
requires the supplied Authorizer to have the RUN_STATUS_SCOPE.
:param statuses: A list of statuses used to filter the Actions that are
returned by the listing. Returned Actions are guaranteed to have one
of the specified ``statuses``. Valid values are:
- SUCCEEDED
- FAILED
- ACTIVE
- INACTIVE
:param roles:
.. deprecated:: 0.12
Use ``role`` instead
See description for ``role`` parameter. Providing multiple roles behaves as
if only a single ``role`` value is provided and displays the equivalent of
the most permissive role.
:param role: A role value specifying the minimum role-level permission on the
runs which will be returned based on the follow precedence of role values:
- run_monitor
- run_manager
- run_owner
Thus, if, for example, ``run_manager`` is specified, runs for which the
user has the ``run_manager``, or ``run_owner`` roles
will be returned.
:param marker: A pagination_token indicating the page of results to
return and how many entries to return. This is created by the Flows
service and returned by operations that support pagination.
:param per_page: The number of results to return per page. If
supplied a pagination_token, this parameter has no effect.
:param filters: A filtering criteria to apply to the resulting Action
listing. The keys indicate the filter, the values indicate the
pattern to match. The returned data will be the result of a logical
AND between the filters. Patterns may be comma separated to produce
the result of a logical OR.
:param orderings: An ordering criteria to apply to the resulting
Action listing. The keys indicate the field to order on, and
the value can be either ASC, for ascending order, or DESC, for
descending order. The first ordering criteria will be used to sort
the data, subsequent ordering criteria will be applied for ties.
Note: To ensure orderings are applied in the correct order, use an
OrderedDict if trying to apply multiple orderings.
"""
params = {}
# *role* takes precedence over *roles* (plural).
if role:
params["filter_role"] = role
elif roles:
params["filter_roles"] = ",".join(roles)
# *marker* takes precedence over *per_page*.
if marker:
params["pagination_token"] = marker
elif per_page:
params["per_page"] = str(per_page)
if statuses:
params["filter_status"] = ",".join(statuses)
if orderings:
params["orderby"] = ",".join(
f"{field} {value}" for field, value in orderings.items()
)
if filters:
# Prevent *filters* from overwriting reserved keys.
filters.pop("filter_role", None)
filters.pop("filter_roles", None)
filters.pop("filter_status", None)
filters.pop("orderby", None)
filters.pop("pagination_token", None)
filters.pop("per_page", None)
params.update(filters)
authorizer = self._get_authorizer_for_flow("", RUN_STATUS_SCOPE, kwargs)
with self.use_temporary_authorizer(authorizer):
return self.get("/runs", query_params=params, **kwargs)
def enumerate_actions(
self,
**kwargs,
) -> GlobusHTTPResponse:
"""
An alias for ``enumerate_runs``
"""
return self.enumerate_runs(**kwargs)
def list_flow_actions(
self,
**kwargs,
) -> GlobusHTTPResponse:
"""
An alias for ``list_flow_runs``
"""
return self.list_flow_runs(**kwargs)
def list_flow_runs(
self,
flow_id: Optional[str] = None,
flow_scope: Optional[str] = None,
statuses: Optional[Iterable[str]] = None,
roles: Optional[Iterable[str]] = None,
marker: Optional[str] = None,
per_page: Optional[int] = None,
filters: Optional[dict] = None,
orderings: Optional[dict] = None,
role: Optional[str] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""List all Runs for a particular Flow.
If no flow_id is provided, all runs for all Flows will be returned.
:param flow_id: The UUID identifying the Flow which launched the Run.
If not provided, all runs will be returned regardless of which Flow was
used to start the Run (equivalent to ``enumerate_runs``).
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param statuses: The same as in ``enumerate_runs``.
:param roles:
.. deprecated:: 0.12
Use ``role`` instead
The same as in ``enumerate_runs``.
:param marker: The same as in ``enumerate_runs``.
:param per_page: The same as in ``enumerate_runs``.
:param filters: The same as in ``enumerate_runs``.
:param orderings: The same as in ``enumerate_runs``.
:param role: The same as in ``enumerate_runs``.
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
if flow_id is None:
return self.enumerate_runs(
filters=filters,
marker=marker,
orderings=orderings,
per_page=per_page,
role=role,
roles=roles,
statuses=statuses,
**kwargs,
)
params = {}
# *role* takes precedence over *roles* (plural).
if role:
params["filter_role"] = role
elif roles:
params["filter_roles"] = ",".join(roles)
# *marker* takes precedence over *per_page*.
if marker:
params["pagination_token"] = marker
elif per_page:
params["per_page"] = str(per_page)
if statuses:
params["filter_status"] = ",".join(statuses)
if orderings:
params["orderby"] = ",".join(
f"{field} {value}" for field, value in orderings.items()
)
if filters:
# Prevent *filters* from overwriting reserved keys.
filters.pop("filter_role", None)
filters.pop("filter_roles", None)
filters.pop("filter_status", None)
filters.pop("orderby", None)
filters.pop("pagination_token", None)
filters.pop("per_page", None)
params.update(filters)
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
with self.use_temporary_authorizer(authorizer):
return self.get(f"/flows/{flow_id}/runs", query_params=params, **kwargs)
def flow_action_update(
self,
action_id: str,
run_managers: Optional[Sequence[str]] = None,
run_monitors: Optional[Sequence[str]] = None,
tags: Optional[Sequence[str]] = None,
label: Optional[str] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""
Update a Flow Action.
:param action_id: The UUID identifying the Action to update
:param run_managers: A list of Globus Auth URNs which will have the
ability to alter the execution of the Action. Supplying an empty
list will remove all existing managers.
:param run_monitors: A list of Globus Auth URNs which will have the
ability to view the execution of the Action. Supplying an empty list
will remove all existing monitors.
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
:param tags:
A list of tags to apply to the Run.
:param label:
A label to apply to the Run.
"""
payload = {}
if run_managers is not None:
payload["run_managers"] = run_managers
if run_monitors is not None:
payload["run_monitors"] = run_monitors
if tags is not None:
payload["tags"] = tags
if label is not None:
payload["label"] = label
authorizer = self._get_authorizer_for_flow("", RUN_MANAGE_SCOPE, kwargs)
with self.use_temporary_authorizer(authorizer):
return self.put(f"/runs/{action_id}", data=payload, **kwargs)
def update_runs(
self,
# Filters
run_ids: Iterable[str],
# Tags
add_tags: Optional[Iterable[str]] = None,
remove_tags: Optional[Iterable[str]] = None,
set_tags: Optional[Iterable[str]] = None,
# Run managers
add_run_managers: Optional[Iterable[str]] = None,
remove_run_managers: Optional[Iterable[str]] = None,
set_run_managers: Optional[Iterable[str]] = None,
# Run monitors
add_run_monitors: Optional[Iterable[str]] = None,
remove_run_monitors: Optional[Iterable[str]] = None,
set_run_monitors: Optional[Iterable[str]] = None,
# Status
status: Optional[str] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""
Update a Flow Action.
:param run_ids:
A list of Run ID's to query.
:param set_tags:
A list of tags to set on the specified Run ID's.
If the list is empty, all tags will be deleted from the specified Run ID's.
.. note::
The ``set_tags``, ``add_tags``, and ``remove_tags`` arguments
are mutually exclusive.
:param add_tags:
A list of tags to add to each of the specified Run ID's.
.. note::
The ``set_tags``, ``add_tags``, and ``remove_tags`` arguments
are mutually exclusive.
:param remove_tags:
A list of tags to remove from each of the specified Run ID's.
.. note::
The ``set_tags``, ``add_tags``, and ``remove_tags`` arguments
are mutually exclusive.
:param set_run_managers:
A list of Globus Auth URN's to set on the specified Run ID's.
If the list is empty, all Run managers will be deleted
from the specified Run ID's.
.. note::
The ``set_run_managers``, ``add_run_managers``, and
``remove_run_managers`` arguments are mutually exclusive.
:param add_run_managers:
A list of Globus Auth URN's to add to each of the specified Run ID's.
.. note::
The ``set_run_managers``, ``add_run_managers``, and
``remove_run_managers`` arguments are mutually exclusive.
:param remove_run_managers:
A list of Globus Auth URN's to remove from each of the specified Run ID's.
.. note::
The ``set_run_managers``, ``add_run_managers``, and
``remove_run_managers`` arguments are mutually exclusive.
:param set_run_monitors:
A list of Globus Auth URN's to set on the specified Run ID's.
If the list is empty, all Run monitors will be deleted
from the specified Run ID's.
.. note::
The ``set_run_monitors``, ``add_run_monitors``, and
``remove_run_monitors`` arguments are mutually exclusive.
:param add_run_monitors:
A list of Globus Auth URN's to add to each of the specified Run ID's.
.. note::
The ``set_run_monitors``, ``add_run_monitors``, and
``remove_run_monitors`` arguments are mutually exclusive.
:param remove_run_monitors:
A list of Globus Auth URN's to remove from each of the specified Run ID's.
.. note::
The ``set_run_monitors``, ``add_run_monitors``, and
``remove_run_monitors`` arguments are mutually exclusive.
:param status:
A status to set for all specified Run ID's.
:param kwargs:
Any additional keyword arguments passed to this method
are passed to the Globus BaseClient.
If an "authorizer" keyword argument is passed,
it will be used to authorize the Flow operation.
Otherwise, the authorizer_callback defined for the FlowsClient will be used.
:raises ValueError:
If more than one mutually-exclusive argument is provided.
For example, if ``set_tags`` and ``add_tags`` are both specified,
or if ``add_run_managers`` and ``remove_run_managers`` are both specified.
"""
multi_fields = ("tags", "run_managers", "run_monitors")
multi_ops = ("add", "remove", "set")
# Enforce mutual exclusivity of arguments.
for field in multi_fields:
values = (
locals()[f"set_{field}"],
locals()[f"add_{field}"],
locals()[f"remove_{field}"],
)
if sum(1 for value in values if value is not None) > 1:
raise ValueError(
f"`set_{field}`, `add_{field}`, and `remove_{field}`"
" are mutually exclusive. Only one can be used."
)
# Populate the JSON document to submit.
data: dict = {
"filters": {
"run_ids": list(run_ids),
},
"set": {},
"add": {},
"remove": {},
}
for field in multi_fields:
if locals()[f"add_{field}"] is not None:
data["add"][field] = locals()[f"add_{field}"]
if locals()[f"remove_{field}"] is not None:
data["remove"][field] = locals()[f"remove_{field}"]
if locals()[f"set_{field}"] is not None:
data["set"][field] = locals()[f"set_{field}"]
if status is not None:
data["set"]["status"] = status
authorizer = self._get_authorizer_for_flow("", RUN_MANAGE_SCOPE, kwargs)
with self.use_temporary_authorizer(authorizer):
return self.post("/batch/runs", data=data, **kwargs)
def flow_action_log(
self,
flow_id: str,
flow_scope: str,
flow_action_id: str,
limit: int = 10,
reverse_order: bool = False,
marker: Optional[str] = None,
per_page: Optional[int] = None,
**kwargs,
) -> GlobusHTTPResponse:
"""
Retrieve an Action's execution log history for an Action that was launched by a
specific Flow.
:param flow_id: The UUID identifying the Flow which launched the Action
:param flow_scope: The scope associated with the Flow ``flow_id``. If
not provided, the SDK will attempt to perform an introspection on
the Flow to determine its scope automatically
:param flow_action_id: The ID specifying which Action's history to query
:param limit: An integer specifying the maximum number of records for
the Action's execution history to return.
:param reverse_order: An indicator for whether to retrieve the records
in reverse-chronological order.
:param marker: A pagination_token indicating the page of results to
return and how many entries to return. This is created by the Flows
service and returned by operations that support pagination.
:param per_page: The number of results to return per page. If
supplied a pagination_token, this parameter has no effect.
:param kwargs: Any additional kwargs passed into this method are passed
onto the Globus BaseClient. If there exists an "authorizer" keyword
argument, that gets used to run the Flow operation. Otherwise, the
authorizer_callback defined for the FlowsClient will be used.
"""
authorizer = self._get_authorizer_for_flow(flow_id, flow_scope, kwargs)
flow_url = urljoin(self.base_url, f"/flows/{flow_id}")
ac = ActionClient.new_client(flow_url, authorizer)
return ac.log(flow_action_id, limit, reverse_order, marker, per_page)
@classmethod
def new_client(
cls: Type[_FlowsClient],
client_id: str,
authorizer_callback: AuthorizerCallbackType,
authorizer: Optional[GlobusAuthorizer],
base_url: Optional[str] = None,
http_timeout: int = 10,
) -> _FlowsClient:
"""
Classmethod to simplify creating an FlowsClient. Use this method when
attemping to create a FlowsClient with pre-existing credentials or
authorizers. This method is useful when creating a FlowClient for
interacting with a Flow without wanting to launch an interactive login
process.
:param client_id: The client_id to associate with this FlowsClient.
:param authorizer_callback: A callable which is capable of returning an
authorizer for a particular Flow. The callback should accept three
keyword-args: flow_url, flow_scope, client_id. Using some, all, or
none of these args, the callback should return a GlobusAuthorizer
which provides access to the targetted Flow.
:param authorizer: The authorizer to use for validating requests to the
Flows service. This authorizer is used when interacting with the
Flow's service, it is not used for interactive with a particular
flow. Therefore, this authorizer should grant consent to the
MANAGE_FLOWS_SCOPE. For interacting with a particular flow, set the
authorizer_callback parameter.
:param base_url: The url at which the target Action Provider is
located.
:param http_timeout: The amount of time to wait for connections to
the Action Provider to be made.
**Examples**
>>> def cli_authorizer_callback(**kwargs):
flow_url = kwargs["flow_url"]
flow_scope = kwargs["flow_scope"]
client_id = kwargs["client_id"]
return get_cli_authorizer(flow_url, flow_scope, client_id)
>>> action_url = "https://actions.globus.org/hello_world"
>>> client_id = "00000000-0000-0000-0000-000000000000"
>>> auth = ...
>>> fc = FlowsClient.new_client(client_id, cli_authorizer_callback, auth)
>>> print(fc.list_flows())
"""
if base_url is None:
base_url = _get_flows_base_url_for_environment()
verify_ssl = urlparse(base_url).hostname not in {"localhost", "127.0.0.1"}
return cls(
client_id,
authorizer_callback,
app_name="Globus Automate SDK FlowsClient",
base_url=base_url,
authorizer=authorizer,
transport_params={
"http_timeout": http_timeout,
"verify_ssl": verify_ssl,
},
)
| 51,219 | 37.80303 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/helpers.py
|
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
def merge_keywords(
base: Optional[Iterable[str]],
kwargs: Dict[str, Union[str, Iterable[str]]],
*keywords: str,
) -> Optional[List[str]]:
"""Merge all given keyword parameter aliases and deduplicate the values.
.. warning::
This function has a side-effect. It deliberately modifies *kwargs* in-place.
Any keyword alias that exists in *kwargs* will be removed from *kwargs*.
If an alias key exists in *kwargs* and has a value other than None
then it will be included in the final result that is returned.
If *base* is None and all found aliases have a value of None,
then None will be returned.
For example, given a function with the following call signature:
.. code-block:: python
def example(names=None, **kwargs):
pass
It is possible to quickly add support for additional parameter names
so that users can call the function with alternative keyword parameters:
.. code-block:: python
all_names = merge_keywords(names, kwargs, "pseudonyms", "nicknames")
"""
result: Optional[Set[str]] = None
if base is not None:
result = set(base)
for keyword in keywords:
# Consume the keyword alias.
# NOTE: .pop() is a destructive operation with a deliberate side-effect.
value = kwargs.pop(keyword, None)
if value is None:
continue
# Update the final result.
if result is None:
result = set()
if isinstance(value, str):
result.add(value)
else:
result |= set(value)
if result is None:
return None
return list(result)
def validate_aliases(
canonical_item: Tuple[str, Any],
*aliases: Tuple[str, Any],
) -> Any:
"""Validate and standardize canonical values and aliased values.
There are several places where names in the Flows service have changed.
This function helps regulate the deprecation lifecycle of these names.
* The canonical name cannot be combined with one of its aliases.
A canonical value that evaluates to True, and any alias value that is not None,
will be considered a violation of this requirement.
* Only one alias MAY have a value other than None.
:raises ValueError:
If one of the validation rules is broken.
:raises DeprecationWarning:
If an alias is used instead of the canonical name.
The DeprecationWarning is instantiated with arguments in this order:
* A deprecation message.
* The name of the alias that was used.
* The value of the alias.
This design allows the CLI code to send the warning to STDERR
and allows the API code to issue a true Python warning
that can be managed by the calling application as desired.
"""
canonical_name, canonical_value = canonical_item
arguments = {k: v for k, v in aliases if v is not None}
if canonical_value and arguments:
raise ValueError(f"{canonical_name} cannot be combined with an alias.")
if len(arguments) > 1:
# Construct a readable, comma-separated list of argument names.
alias_name_list = list(arguments)
alias_names = ", ".join(alias_name_list[:-1])
if len(arguments) >= 3:
alias_names += "," # Add an Oxford comma.
alias_names = " and ".join([alias_names, alias_name_list[-1]])
message = f"{alias_names} cannot be used together. Please use {canonical_name}."
raise ValueError(message)
if arguments:
alias_name, alias_value = arguments.popitem()
raise DeprecationWarning(
f"{alias_name} is deprecated. Please use {canonical_name}.",
alias_name,
alias_value,
)
return canonical_value
| 3,903 | 32.655172 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/queues_client.py
|
import uuid
from typing import Any, Dict, List, Optional
from globus_sdk import (
AccessTokenAuthorizer,
BaseClient,
ClientCredentialsAuthorizer,
GlobusHTTPResponse,
RefreshTokenAuthorizer,
)
from globus_automate_client.cli.auth import get_authorizer_for_scope
_prod_queues_base_url = "https://queues.api.globus.org/v1"
QUEUES_ADMIN_SCOPE = (
"https://auth.globus.org/scopes/3170bf0b-6789-4285-9aba-8b7875be7cbc/admin"
)
QUEUES_SEND_SCOPE = (
"https://auth.globus.org/scopes/3170bf0b-6789-4285-9aba-8b7875be7cbc/send"
)
QUEUES_RECEIVE_SCOPE = (
"https://auth.globus.org/scopes/3170bf0b-6789-4285-9aba-8b7875be7cbc/receive"
)
ALL_QUEUES_SCOPES = (QUEUES_ADMIN_SCOPE, QUEUES_SEND_SCOPE, QUEUES_RECEIVE_SCOPE)
class QueuesClient(BaseClient):
allowed_authorizer_types = (
AccessTokenAuthorizer,
RefreshTokenAuthorizer,
ClientCredentialsAuthorizer,
)
base_path: str = ""
service_name: str = "queues"
def __init__(self, client_id, **kwargs) -> None:
super().__init__(**kwargs)
self.client_id = client_id
def create_queue(
self,
label: str,
admins: List[str],
senders: List[str],
receivers: List[str],
delivery_timeout: int = 60,
**kwargs,
) -> GlobusHTTPResponse:
self.authorizer = get_authorizer_for_scope(QUEUES_ADMIN_SCOPE)
data = {
"data": {
"label": label,
"admins": admins,
"senders": senders,
"receivers": receivers,
"delivery_timeout": delivery_timeout,
}
}
return self.post("/queues", data=data, **kwargs)
def get_queue(self, queue_id: str) -> GlobusHTTPResponse:
return self.get(f"/queues/{queue_id}")
def list_queues(
self, roles: Optional[List[str]] = None, **kwargs
) -> GlobusHTTPResponse:
self.authorizer = get_authorizer_for_scope(QUEUES_ADMIN_SCOPE)
params = {}
if roles is not None and len(roles) > 0:
params.update(dict(roles=",".join(roles)))
return self.get("/queues", query_params=params, **kwargs)
def update_queue(
self,
queue_id: str,
label: Optional[str] = None,
admins: Optional[List[str]] = None,
senders: Optional[List[str]] = None,
receivers: Optional[List[str]] = None,
delivery_timeout: Optional[int] = None,
visibility_timeout: Optional[int] = None,
**kwargs,
) -> Optional[GlobusHTTPResponse]:
body = {
"id": queue_id,
"label": label,
"admins": admins,
"senders": senders,
"receivers": receivers,
"delivery_timeout": delivery_timeout,
"visibility_timeout": visibility_timeout,
}
# Remove the missing values from the update operation
body = {k: v for k, v in body.items() if v is not None}
if body:
return self.put(f"/queues/{queue_id}", data={"data": body}, **kwargs)
else:
return None
def delete_queue(self, queue_id: str) -> str:
try:
delete_op_resp = self.delete(f"/queues/{queue_id}")
return str(delete_op_resp)
except KeyError:
# Client lib seems to choke if there's no content-type on the response which
# queues doesn't seem to set on delete. Catch that as best we can as a
# KeyError then return a somewhat useful string
return f"Queue {queue_id} deleted."
def send_message(
self, queue_id: str, message_body: str, deduplication_id: Optional[str] = None
) -> GlobusHTTPResponse:
self.authorizer = get_authorizer_for_scope(QUEUES_SEND_SCOPE)
if deduplication_id is None:
deduplication_id = str(uuid.uuid4())
data = {
"data": [
{"deduplication_id": deduplication_id, "message_body": message_body}
],
}
return self.post(f"/queues/{queue_id}/messages", data=data)
def receive_messages(
self,
queue_id: str,
max_messages: int = 1,
receive_request_attempt_id: Optional[str] = None,
) -> GlobusHTTPResponse:
self.authorizer = get_authorizer_for_scope(QUEUES_RECEIVE_SCOPE)
params: Dict[str, Any] = {"max_messages": max_messages}
if receive_request_attempt_id is not None:
params["receive_request_attempt_id"] = receive_request_attempt_id
return self.get(f"/queues/{queue_id}/messages", query_params=params)
def delete_messages(
self, queue_id: str, receipt_handles: List[str]
) -> GlobusHTTPResponse:
self.authorizer = get_authorizer_for_scope(QUEUES_RECEIVE_SCOPE)
body = {"data": [{"receipt_handle": rh} for rh in receipt_handles]}
return self.request(
"DELETE",
f"/queues/{queue_id}/messages",
data=body,
)
def create_queues_client(
client_id: str, base_url: str = "https://queues.api.globus.org/v1"
) -> QueuesClient:
authorizer = get_authorizer_for_scope(QUEUES_ADMIN_SCOPE)
return QueuesClient(
client_id,
base_url=base_url,
app_name="queues_client",
authorizer=authorizer,
)
| 5,346 | 32.41875 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/auth.py
|
import copy
import json
import os
import pathlib
import platform
import sys
from json import JSONDecodeError
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Union, cast
import click
import typer
from globus_sdk import (
AuthAPIError,
AuthClient,
GlobusAPIError,
NativeAppAuthClient,
OAuthTokenResponse,
)
from globus_sdk.authorizers import (
AccessTokenAuthorizer,
GlobusAuthorizer,
RefreshTokenAuthorizer,
)
from globus_automate_client.action_client import ActionClient
CLIENT_ID = "e6c75d97-532a-4c88-b031-8584a319fa3e"
CLIENT_NAME = "Globus Automate Command Line Interface"
AUTH_SCOPES = [
"openid",
"email",
"profile",
]
DEFAULT_TOKEN_FILE = pathlib.Path.home() / pathlib.Path(".globus_automate_tokens.json")
def _get_base_scope(scope: str):
if "[" in scope:
return scope.split("[")[0]
return scope
class TokenSet(NamedTuple):
"""
Might want to check out this as a replacement:
https://www.attrs.org/en/stable/why.html#namedtuples
"""
access_token: str
refresh_token: Optional[str]
expiration_time: Optional[int]
# Keep track of scopes associated with these tokens with the dependencies still
# included. If we need to get a token where tokens for the base scope exist but
# there isn't a matching dependent scope, that means we need to prompt for consent
# again. If there is a matching full-scope-string in `dependent_scopes`, then we're
# OK to use the token from looking up that base scope.
dependent_scopes: Set[str]
TokensInTokenCache = Dict[str, Union[TokenSet, Dict[str, TokenSet]]]
class TokenCache:
# A prefix we put in place in the token cache dict to create a key sub-dividing the
# cache based on a particular environment.
_environment_prefix = "__"
def __init__(self, token_store: Union[pathlib.Path, str]):
self.token_store = token_store
self.tokens: TokensInTokenCache = {}
self.modified = False
@property
def tokens_for_environment(self):
"""
We will sub-key the full token set for environments other than production
"""
environ = os.environ.get("GLOBUS_SDK_ENVIRONMENT")
if environ is None or environ in {"production", "prod", "default"}:
return self.tokens
environ_cache_key = TokenCache._environment_prefix + environ
if environ_cache_key not in self.tokens:
self.tokens[environ_cache_key]: Dict[str, TokenSet] = {}
return self.tokens[environ_cache_key]
def set_tokens(self, scope: str, tokens: TokenSet) -> TokenSet:
if scope in self.tokens_for_environment:
dependent_scopes = set(tokens.dependent_scopes).union(
set(self.tokens_for_environment[scope].dependent_scopes)
)
new_token_set = TokenSet(
access_token=tokens.access_token,
refresh_token=tokens.refresh_token,
expiration_time=tokens.expiration_time,
dependent_scopes=dependent_scopes,
)
self.tokens_for_environment[scope] = new_token_set
else:
self.tokens_for_environment[scope] = tokens
self.modified = True
return tokens
def get_tokens(self, scope: str) -> Optional[TokenSet]:
if "[" in scope:
# If the full scope string is in our mapping already, we can just return
# the tokens. If not, even if we have a token for the base scope, we
# shouldn't return that because it won't work for the new scope.
base_scope = scope.split("[")[0]
tokens = self.tokens_for_environment.get(base_scope)
if not tokens or scope not in getattr(tokens, "dependent_scopes", set()):
return None
else:
return tokens
return self.tokens_for_environment.get(scope)
@staticmethod
def _deserialize_from_file(file_tokens: Dict[str, Any]) -> TokensInTokenCache:
deserialized: TokensInTokenCache = {}
for k, v in file_tokens.items():
if k.startswith(TokenCache._environment_prefix):
v = TokenCache._deserialize_from_file(v)
else:
v = TokenSet(**v)
deserialized[k] = v
return deserialized
def load_tokens(self):
"""
May raise an EnvironmentError if the cache file exists but can't be read.
"""
try:
with open(self.token_store) as f:
contents = json.load(f)
self.tokens = TokenCache._deserialize_from_file(contents)
except FileNotFoundError:
pass
except JSONDecodeError:
raise EnvironmentError(
"Token cache is corrupted; please run `session revoke` or remove "
f"file {self.token_store} and try again"
)
@staticmethod
def _make_jsonable(tokens) -> Dict[str, Any]:
serialized: Dict[str, Any] = {}
for k, v in tokens.items():
if isinstance(v, TokenSet):
v = v._asdict()
elif isinstance(v, dict):
v = TokenCache._make_jsonable(v)
serialized[k] = v
return serialized
def save_tokens(self):
def default(x):
if isinstance(x, set):
return list(x)
return str(x)
if self.modified:
with open(self.token_store, "w") as f:
if isinstance(self.token_store, pathlib.Path):
self.token_store.chmod(0o600)
else:
os.chmod(self.token_store, 0o600)
jsonable = TokenCache._make_jsonable(self.tokens)
json.dump(
jsonable,
f,
indent=2,
sort_keys=True,
default=default,
)
self.modified = False
def clear_tokens(
self,
environment_aware: bool = True,
callback: Optional[Callable[[str, TokenSet], bool]] = None,
) -> None:
if environment_aware:
tokens = self.tokens_for_environment
else:
tokens = self.tokens
for scope, token_set in copy.copy(tokens).items():
if scope.startswith(TokenCache._environment_prefix):
continue
do_remove = True
if callback is not None:
do_remove = callback(scope, cast(TokenSet, token_set))
if do_remove:
tokens.pop(scope)
self.modified = True
def update_from_oauth_token_response(
self, token_response: OAuthTokenResponse, original_scopes: Set[str]
) -> Dict[str, TokenSet]:
by_scopes = token_response.by_scopes
token_sets: Dict[str, TokenSet] = {}
for scope in by_scopes:
token_info = by_scopes[scope]
dependent_scopes = {s for s in original_scopes if "[" in s}
# token_info must be cast()'ed because mypy detects that
# str and int types exist in the `token_info` dict, adds
# them to the union of possible types, then complains.
token_set = TokenSet(
access_token=cast(str, token_info.get("access_token")),
refresh_token=cast(Optional[str], token_info.get("refresh_token")),
expiration_time=cast(
Optional[int], token_info.get("expires_at_seconds")
),
dependent_scopes=dependent_scopes,
)
self.set_tokens(scope, token_set)
token_sets[scope] = token_set
self.save_tokens()
return token_sets
def _get_globus_sdk_native_client(
client_id: str = CLIENT_ID,
client_name: str = CLIENT_NAME,
):
return NativeAppAuthClient(client_id, app_name=client_name)
def safeprint(s, err: bool = False):
try:
typer.secho(s, err=err)
if err:
sys.stderr.flush()
else:
sys.stdout.flush()
except IOError:
pass
def _do_login_for_scopes(
native_client: NativeAppAuthClient, scopes: List[str]
) -> OAuthTokenResponse:
label = CLIENT_NAME
host = platform.node()
if host:
label = label + f" on {host}"
native_client.oauth2_start_flow(
requested_scopes=scopes,
refresh_tokens=True,
prefill_named_grant=label,
)
linkprompt = (
"Please log into Globus here:\n"
"----------------------------\n"
f"{native_client.oauth2_get_authorize_url()}\n"
"----------------------------\n"
)
safeprint(linkprompt, err=True)
auth_code = typer.prompt("Enter the resulting Authorization Code here", err=True)
return native_client.oauth2_exchange_code_for_tokens(auth_code)
# define this closure with a wrapping function so that 'scope' is properly bound to it
def _new_refresh_handler(token_cache: TokenCache, scope: str):
def refresh_handler(response: OAuthTokenResponse, *args, **kwargs):
token_cache.update_from_oauth_token_response(response, {scope})
return refresh_handler
def get_authorizers_for_scopes(
scopes: List[str],
token_store: Optional[Union[pathlib.Path, str]] = None,
client_id: str = CLIENT_ID,
client_name: str = CLIENT_NAME,
no_login: bool = False,
) -> Dict[str, GlobusAuthorizer]:
token_store = token_store or str(DEFAULT_TOKEN_FILE)
token_cache = TokenCache(token_store)
token_cache.load_tokens()
token_sets: Dict[str, TokenSet] = {}
needed_scopes: Set[str] = set()
native_client = _get_globus_sdk_native_client(client_id, client_name)
for scope in scopes:
token_set = token_cache.get_tokens(scope)
if token_set is not None:
token_sets[scope] = token_set
else:
needed_scopes.add(scope)
if len(needed_scopes) > 0 and not no_login:
token_response = _do_login_for_scopes(native_client, list(needed_scopes))
new_tokens = token_cache.update_from_oauth_token_response(
token_response, set(scopes)
)
token_sets.update(new_tokens)
authorizers: Dict[str, GlobusAuthorizer] = {}
for scope, token_set in token_sets.items():
if token_set is not None:
authorizer: Union[RefreshTokenAuthorizer, AccessTokenAuthorizer]
if token_set.refresh_token is not None:
authorizer = RefreshTokenAuthorizer(
token_set.refresh_token,
native_client,
access_token=token_set.access_token,
expires_at=token_set.expiration_time,
on_refresh=_new_refresh_handler(token_cache, scope),
)
else:
authorizer = AccessTokenAuthorizer(token_set.access_token)
authorizers[_get_base_scope(scope)] = authorizer
authorizers[scope] = authorizer
return authorizers
def get_authorizer_for_scope(
scope: str, client_id: str = CLIENT_ID
) -> Optional[GlobusAuthorizer]:
authorizers = get_authorizers_for_scopes([scope], client_id=client_id)
return authorizers.get(_get_base_scope(scope))
def get_access_token_for_scope(scope: str) -> Optional[str]:
authorizer = get_authorizers_for_scopes([scope]).get(_get_base_scope(scope))
if not authorizer:
click.echo(f"couldn't obtain authorizer for scope: {scope}", err=True)
return None
token = getattr(authorizer, "access_token", None)
if not token:
click.echo("authorizer failed to get token from Globus Auth")
return None
return token
def logout(token_store: Union[pathlib.Path, str] = DEFAULT_TOKEN_FILE) -> bool:
cache = TokenCache(token_store)
cache.load_tokens()
cache.clear_tokens()
cache.save_tokens()
return True
def revoke_login(token_store: Union[pathlib.Path, str] = DEFAULT_TOKEN_FILE) -> bool:
client = _get_globus_sdk_native_client(CLIENT_ID, CLIENT_NAME)
if not client:
click.echo("failed to get auth client", err=True)
return False
cache = TokenCache(token_store)
cache.load_tokens()
def revoker(scope: str, token_set: TokenSet) -> bool:
client.oauth2_revoke_token(token_set.access_token)
client.oauth2_revoke_token(token_set.refresh_token)
return True
cache.clear_tokens(callback=revoker)
cache.save_tokens()
return True
def get_current_user(
no_login: bool = False, token_store: Union[pathlib.Path, str] = DEFAULT_TOKEN_FILE
) -> Optional[Dict[str, Any]]:
"""
When `no_login` is set, returns `None` if not logged in.
"""
# We don't really care which scope from the AUTH_SCOPE list we use here since they
# all share the same resource server (Auth itself) and therefore an authorizer for
# any of them grants us access to the same resource server.
authorizers = get_authorizers_for_scopes(
AUTH_SCOPES, token_store=token_store, no_login=no_login
)
if not authorizers:
return None
auth_client = AuthClient(authorizer=authorizers.get("openid"))
try:
user_info = auth_client.oauth2_userinfo()
except AuthAPIError as e:
click.echo(
(
"Couldn't get user information from Auth service\n"
"(If you rescinded your consents in the Auth service, do `session"
" logout` and try again)\n"
f" Error details: {str(e)}"
),
err=True,
)
sys.exit(1)
return user_info.data
def get_cli_authorizer(
action_url: str,
action_scope: Optional[str],
client_id: str = CLIENT_ID,
) -> Optional[GlobusAuthorizer]:
if action_scope is None:
# We don't know the scope which makes it impossible to get a token,
# but create a client anyways in case this Action Provider is publicly
# visible and we can introspect its scope
try:
action_scope = ActionClient.new_client(action_url, None).action_scope
except GlobusAPIError:
pass
if action_scope:
authorizer = get_authorizer_for_scope(action_scope, client_id)
else:
# Any attempts to use this authorizer will fail but there's nothing we
# can do without a scope.
authorizer = None
return authorizer
| 14,482 | 33.898795 | 87 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/main.py
|
try:
from importlib.metadata import version as get_version # type: ignore
except ImportError:
# Python < 3.8
from importlib_metadata import version as get_version
import typer
import yaml
from globus_automate_client.cli import actions, flows, queues, session
from globus_automate_client.cli.auth import DEFAULT_TOKEN_FILE
# Monkey patching out the unsafe load capability
# Only use safe_load for our purposes
try:
del yaml.unsafe_load
except AttributeError:
pass
help = f"""
CLI for Globus Automate
By default, this CLI keeps all its config and cached tokens in
{DEFAULT_TOKEN_FILE.name} in the user's home directory.
"""
app = typer.Typer(help=help, short_help="Globus Automate CLI")
app.add_typer(actions.app, name="action")
app.add_typer(flows.app, name="flow")
app.add_typer(queues.app, name="queue")
app.add_typer(session.app, name="session")
def version_callback(display_version: bool):
if display_version:
typer.echo(f"globus-automate {get_version('globus-automate-client')}")
raise typer.Exit()
@app.callback()
def main(
version: bool = typer.Option(
None,
"--version",
"-V",
callback=version_callback,
help="Print CLI version number and exit",
is_eager=True,
),
):
pass
if __name__ == "__main__":
app()
| 1,330 | 22.350877 | 78 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/callbacks.py
|
import json
import os
import pathlib
import re
from errno import ENAMETOOLONG
from typing import AbstractSet, Callable, List, Optional, cast
from urllib.parse import urlparse
import typer
import yaml
from globus_sdk import AuthClient
from .auth import get_authorizer_for_scope
_uuid_regex = (
"([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})"
)
_principal_urn_regex = f"^urn:globus:(auth:identity|groups:id):{_uuid_regex}$"
def url_validator_callback(url: str) -> str:
"""
Validates that a user provided string "looks" like a URL aka contains at
least a valid scheme and netloc [www.example.org].
Logic taken from https://stackoverflow.com/a/38020041
"""
if url is None:
return url
url = url.strip()
try:
result = urlparse(url)
if result.scheme and result.netloc:
return url
except Exception:
pass
raise typer.BadParameter("Please supply a valid url")
def _base_principal_validator(
principals: List[str], *, special_vals: AbstractSet[str] = frozenset()
) -> List[str]:
"""
This validator ensures the principal IDs are valid UUIDs prefixed with valid
Globus ID beginnings. It will optionally determine if a provided principal
exists in a set of "special" values.
"""
auth_beginning = "urn:globus:auth:identity:"
auth_client: Optional[AuthClient] = None
valid_principals = []
invalid_principals = []
for p in principals:
if special_vals and p in special_vals or re.match(_principal_urn_regex, p):
valid_principals.append(p)
else:
# Try to do a lookup of the identity
if auth_client is None:
auth = get_authorizer_for_scope(
"urn:globus:auth:scope:auth.globus.org:view_identities"
)
auth_client = AuthClient(authorizer=auth)
auth_resp = auth_client.get_identities(usernames=p)
identities = auth_resp.data.get("identities", [])
if len(identities) == 0:
invalid_principals.append(p)
for identity in identities:
valid_principals.append(auth_beginning + identity["id"])
if invalid_principals:
raise typer.BadParameter(
f"Invalid principal value {'; '.join(invalid_principals)}"
)
return valid_principals
def principal_validator(principals: List[str]) -> List[str]:
"""A principal ID needs to be a valid UUID."""
return _base_principal_validator(cast(List[str], principals))
def custom_principal_validator(special_values: AbstractSet[str]) -> Callable:
"""A principal ID needs to be a valid UUID."""
def wrapper(principals: List[str]) -> List[str]:
return _base_principal_validator(principals, special_vals=special_values)
return wrapper
def flows_endpoint_envvar_callback(default_value: str) -> str:
"""
This callback searches the caller's environment for an environment variable
defining the target Flow endpoint.
"""
return os.getenv("GLOBUS_AUTOMATE_FLOWS_ENDPOINT", default_value)
def input_validator(body: str) -> str:
"""
Checks if input is a file and loads its contents, otherwise returns the
supplied string. This validator will also attempt to parse the string as a
dict, failing if the parsing fails.
"""
# Callbacks are run regardless of whether an option was explicitly set.
# Handle the scenario where the default value for an option is empty
if not body:
return body
# Reading from a file was indicated by prepending the filename with the @
# symbol -- for backwards compatability check if the symbol is present and
# remove it
body = body.lstrip("@")
body_path = pathlib.Path(body)
try:
if body_path.exists() and body_path.is_dir():
raise typer.BadParameter("Expected file, received directory")
elif body_path.exists() and body_path.is_file():
with body_path.open() as f:
body = f.read()
except OSError as e:
if e.errno == ENAMETOOLONG:
# We cannot load the string to check if it exists, is a file, or
# is a directory, so we have to assume the string is JSON and
# continue
pass
try:
parsed_body = json.loads(body)
except json.JSONDecodeError:
try:
parsed_body = yaml.safe_load(body)
except yaml.YAMLError:
raise typer.BadParameter("Unable to load input as JSON or YAML")
if not isinstance(parsed_body, dict):
raise typer.BadParameter("Unable to load input as JSON or YAML")
return body
def flow_input_validator(body: str) -> str:
"""
Flow inputs can be either YAML or JSON formatted
We can encompass these with just the YAML load checking,
but we need a more generic error message than is provided
by the other validators
"""
# Callbacks are run regardless of whether an option was explicitly set.
# Handle the scenario where the default value for an option is empty
if not body:
return body
body = input_validator(body)
try:
yaml_body = yaml.safe_load(body)
except yaml.YAMLError as e:
raise typer.BadParameter(f"Invalid flow input: {e}")
try:
yaml_to_json = json.dumps(yaml_body)
except TypeError as e:
raise typer.BadParameter(f"Unable to translate flow input to JSON: {e}")
return yaml_to_json
| 5,537 | 32.161677 | 83 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/rich_rendering.py
|
from rich.console import Group
from rich.live import Live
class Content:
"""
This class represents a per CLI invocation "canvas" which will hold the data
that gets displayed on the CLI. All content updates happen by updating the
value of the instance's RenderGroup
"""
rg: Group = Group()
def __rich__(self) -> Group:
return self.rg
cli_content = Content()
live_content = Live(cli_content, refresh_per_second=20)
| 455 | 21.8 | 80 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/constants.py
|
import enum
import typer
from globus_sdk import GlobusHTTPResponse
from globus_automate_client.graphviz_rendering import (
graphviz_format,
state_colors_for_log,
)
class FlowRole(str, enum.Enum):
flow_viewer = "flow_viewer"
flow_starter = "flow_starter"
flow_administrator = "flow_administrator"
flow_owner = "flow_owner"
class FlowRoleDeprecated(str, enum.Enum):
created_by = "created_by"
visible_to = "visible_to"
runnable_by = "runnable_by"
administered_by = "administered_by"
class FlowRoleAllNames(str, enum.Enum):
# Compile supported and deprecated names explicitly to satisfy mypy.
flow_viewer = "flow_viewer"
flow_starter = "flow_starter"
flow_administrator = "flow_administrator"
flow_owner = "flow_owner"
# Deprecated
created_by = "created_by"
visible_to = "visible_to"
runnable_by = "runnable_by"
administered_by = "administered_by"
class ActionRole(str, enum.Enum):
run_monitor = "run_monitor"
run_manager = "run_manager"
run_owner = "run_owner"
class ActionRoleDeprecated(str, enum.Enum):
created_by = "created_by"
monitor_by = "monitor_by"
manage_by = "manage_by"
class ActionRoleAllNames(str, enum.Enum):
# Compile supported and deprecated names explicitly to satisfy mypy.
run_monitor = "run_monitor"
run_manager = "run_manager"
run_owner = "run_owner"
# Deprecated
created_by = "created_by"
monitor_by = "monitor_by"
manage_by = "manage_by"
class ActionStatus(str, enum.Enum):
succeeded = "SUCCEEDED"
failed = "FAILED"
active = "ACTIVE"
inactive = "INACTIVE"
class OutputFormat(str, enum.Enum):
"""
This class defines the generally supported output formats
"""
json = "json"
yaml = "yaml"
class ListingOutputFormat(str, enum.Enum):
"""
This class represents the different output formats for lists of data
"""
json = "json"
yaml = "yaml"
table = "table"
class RunLogOutputFormat(str, enum.Enum):
"""
This class represents the different formats in which a Run's logs may be
displayed
"""
json = "json"
yaml = "yaml"
table = "table"
image = "image"
graphiz = "graphiz"
def visualize(self, flow_log: GlobusHTTPResponse, flow_def: GlobusHTTPResponse):
if self == "image":
self.graphviz_image(flow_log, flow_def)
elif self == "graphiz":
self.graphviz_text(flow_log, flow_def)
def graphviz_text(self, flow_log: GlobusHTTPResponse, flow_def: GlobusHTTPResponse):
graphviz_out = self._as_graphiz(flow_log, flow_def)
typer.echo(graphviz_out.source)
def graphviz_image(
self, flow_log: GlobusHTTPResponse, flow_def: GlobusHTTPResponse
):
graphviz_out = self._as_graphiz(flow_log, flow_def)
graphviz_out.render("flows-output/graph", view=True, cleanup=True)
def _as_graphiz(self, flow_log: GlobusHTTPResponse, flow_def: GlobusHTTPResponse):
definition = flow_def.data["definition"]
colors = state_colors_for_log(flow_log.data["entries"])
return graphviz_format(definition, colors)
class ImageOutputFormat(str, enum.Enum):
"""
This class represents the different ways of visualizing a Flow
"""
json = "json"
yaml = "yaml"
image = "image"
graphviz = "graphviz"
def visualize(self, flow_dict):
if self == "image":
self.graphviz_image(flow_dict)
elif self == "graphviz":
self.graphviz_text(flow_dict)
def graphviz_text(self, flow):
graphviz_out = graphviz_format(flow, None)
typer.echo(graphviz_out.source)
def graphviz_image(self, flow):
graphviz_out = graphviz_format(flow, None)
graphviz_out.render("flows-output/graph", view=True, cleanup=True)
def graphviz_text(flow_log: GlobusHTTPResponse, flow_def: GlobusHTTPResponse):
definition = flow_def.data["definition"]
colors = state_colors_for_log(flow_log.data["entries"])
graphviz_out = graphviz_format(definition, colors)
typer.echo(graphviz_out.source)
def graphviz_image(flow_log: GlobusHTTPResponse, flow_def: GlobusHTTPResponse):
definition = flow_def.data["definition"]
colors = state_colors_for_log(flow_log.data["entries"])
graphviz_out = graphviz_format(definition, colors)
graphviz_out.render("flows-output/graph", view=True, cleanup=True)
| 4,445 | 26.614907 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/rich_helpers.py
|
import abc
import collections
import functools
import json
from time import sleep
from typing import Any, Callable, Dict, List, Optional, Set, Type, Union, cast
import arrow
import typer
import yaml
from globus_sdk import AuthClient, BaseClient, GlobusAPIError, GlobusHTTPResponse
from requests import Response
from rich.console import Group, RenderableType
from rich.spinner import Spinner
from rich.table import Table
from rich.text import Text
from typing_extensions import Literal
from .auth import get_authorizers_for_scopes
from .constants import OutputFormat
from .helpers import get_http_details
from .rich_rendering import cli_content, live_content
_title_max_width = 25
_uuid_min_width = 36
def humanize_datetimes(dt: str) -> str:
return arrow.get(dt).humanize()
def humanize_auth_urn(urn: str) -> str:
id = urn.split(":")[-1]
if urn.startswith("urn:globus:auth:identity:"):
return f"User: {id}"
elif urn.startswith("urn:globus:groups:id:"):
return f"Group: {id}"
return urn
def identity_to_user(field: str, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Given a list of dict entries, this function will attempt to
"""
# get_identities will fail if there's no data, so short circuit
if len(items) == 0:
return items
# Only do the conversion if the user is already logged in
authzs = get_authorizers_for_scopes(["openid"], no_login=True)
authorizer = authzs.get("openid")
if authorizer is None:
return items
# Collect IDs from the List data
creators: Dict[str, None] = collections.OrderedDict()
for item in items:
urn_id = item.get(field, "")
id = urn_id.split(":")[-1]
creators[id] = None
# Get id -> username mapping
ac = AuthClient(authorizer=authorizer)
resp = ac.get_identities(ids=creators.keys())
id_to_user = {i["id"]: i["username"] for i in resp.data["identities"]}
# Update items in list
for item in items:
urn_id = item.get(field, "")
id = urn_id.split(":")[-1]
if id in id_to_user:
item[field] = id_to_user[id]
return items
class Field:
"""
A generic class structure for transforming lists of data into a table. Each
instance of this class represents a single field to pull from a data item.
"""
def __init__(
self,
name: str,
default: str,
transformation: Optional[Callable[[str], str]] = None,
min_width: Optional[int] = None,
max_width: Optional[int] = None,
) -> None:
"""
name: The name of the field which contains the data for display
default: A placeholder to use if the field is not available
transformation: A callable that takes and returns a string. This is used
to format the data field in an item
"""
self.name = name
self.default = default
self.transformation = transformation
self.min_width = min_width
self.max_width = max_width
class DisplayFields(abc.ABC):
"""
An object representing how to parse lists of data into a table.
fields: A list of Fields that defines which fields get rendered into a table
and how they are presented
path_to_data_list: A key under which the data list is defined
"""
fields: List[Field]
path_to_data_list: str
prehook: Optional[Callable[[List[Dict[str, Any]]], List[Dict[str, Any]]]] = None
class RunListDisplayFields(DisplayFields):
"""
This object defines the fields and display style of a Run listing into a
table.
"""
fields = [
Field("action_id", "", min_width=_uuid_min_width),
Field("label", "<EMPTY>"),
Field("status", ""),
Field("start_time", "", humanize_datetimes),
Field("created_by", "", humanize_auth_urn),
Field("flow_id", "<DELETED>"),
]
path_to_data_list = "runs"
prehook = functools.partial(identity_to_user, "created_by")
class FlowListDisplayFields(DisplayFields):
"""
This object defines the fields and display style of a Flow listing into a
table.
"""
fields = [
Field("title", "", max_width=_title_max_width),
Field("id", "", min_width=_uuid_min_width),
Field("flow_owner", "", humanize_auth_urn),
# Field("created_at", "", humanize_datetimes),
# Field("updated_at", "", humanize_datetimes),
]
path_to_data_list = "flows"
prehook = functools.partial(identity_to_user, "flow_owner")
class RunLogDisplayFields(DisplayFields):
"""
This object defines the fields and display style of a Run log listing into a
table.
"""
fields = [
Field("code", ""),
Field("description", ""),
Field("time", "", humanize_datetimes),
]
path_to_data_list = "entries"
class CompletionDetector(abc.ABC):
"""
An object that can be used to determine if an operation is complete or if it
should continue polling.
"""
terminals: Set[str]
@classmethod
@abc.abstractmethod
def is_complete(cls, result: Union[GlobusHTTPResponse, GlobusAPIError]) -> bool:
"""
Given either a GloubsHTTPReponse or GlobusAPIError, this method should
return a boolean indicating if polling should continue.
"""
pass
class ActionCompletionDetector(CompletionDetector):
"""
This class determines when a Run has reached a completed state.
"""
terminals: Set[str] = {"SUCCEEDED", "FAILED"}
@classmethod
def is_complete(cls, result: Union[GlobusHTTPResponse, GlobusAPIError]) -> bool:
return (
isinstance(result, GlobusAPIError)
or result.data.get("status", None) in cls.terminals
)
class LogCompletionDetector(CompletionDetector):
"""
This class determines when a Run has reached a completed state from
inspecting its logs.
"""
terminals: Set[str] = {"FlowSucceeded", "FlowFailed", "FlowCanceled"}
@classmethod
def is_complete(cls, result: Union[GlobusHTTPResponse, GlobusAPIError]) -> bool:
return isinstance(result, GlobusAPIError) or any(
entry["code"] in cls.terminals for entry in result.data["entries"]
)
class Result:
"""
A class which wraps a Globus API response.
"""
def __init__(
self,
response: Union[GlobusHTTPResponse, GlobusAPIError, str],
detector: Type[CompletionDetector] = ActionCompletionDetector,
):
self.result = response
self.detector = detector
self.is_api_error = isinstance(response, GlobusAPIError)
self.data: Union[str, Dict[str, Any]]
if isinstance(response, str):
self.data = {"result": response}
elif isinstance(response, GlobusAPIError):
self.data = response.raw_json if response.raw_json else response.raw_text
else:
self.data = response.data
@property
def details(self) -> str:
return get_http_details(self.result)
@property
def completed(self) -> bool:
return isinstance(self.result, str) or self.detector.is_complete(self.result)
def as_json(self) -> str:
return json.dumps(self.data, indent=2).strip()
def as_yaml(self) -> str:
return yaml.dump(self.data, indent=2).strip()
class Renderer:
"""
A class which understands how to render itself as json, yaml or a table and
under which cirsumstances to use rich rendering. It's important that not
every output be rendered using rich because JSON parsing tools cannot parse
rich objects.
"""
def __init__(
self,
result: Result,
*,
verbose: bool = False,
watching: bool = False,
format: Literal["json", "yaml", "table"] = "json",
fields: Optional[Type[DisplayFields]] = None,
run_once: bool = False,
):
self.result = result
self.verbose = verbose
self.watching = watching
self.run_once = run_once
self.format = format
self.fields = fields
self.table_style = "orange1"
self.detail_style = "cyan"
self.success_style = "green"
self.fail_style = "red"
self.spinner = Spinner("simpleDotsScrolling")
if format == "table":
assert fields is not None
@property
def will_update(self) -> bool:
return self.watching and not self.result.completed and not self.run_once
@property
def use_rich_rendering(self) -> bool:
return self.watching or self.format == "table"
@property
def result_style(self) -> str:
if self.result.is_api_error:
return self.fail_style
return self.success_style
@property
def details_as_text(self) -> Text:
return Text(self.result.details, style=self.detail_style)
@property
def details_as_str(self) -> str:
return self.details_as_text.plain
@property
def result_as_text(self) -> Text:
if self.result.is_api_error:
style = self.fail_style
else:
style = self.success_style
if self.format == "yaml":
return Text(self.result.as_yaml(), style)
return Text(self.result.as_json(), style)
@property
def result_as_renderable(self) -> RenderableType:
if self.format != "table" or self.result.is_api_error:
return self.result_as_text
assert self.fields is not None
table = Table()
for f in self.fields.fields:
table.add_column(
f.name,
style=self.table_style,
min_width=f.min_width,
max_width=f.max_width,
)
list_of_data: List[Dict[str, Any]] = cast(dict, self.result.data).get(
self.fields.path_to_data_list,
[],
)
if self.fields.prehook:
list_of_data = self.fields.prehook(list_of_data)
for d in list_of_data:
row_values = []
for f in self.fields.fields:
value = d.get(f.name, f.default)
if f.transformation:
value = f.transformation(value)
row_values.append(value)
table.add_row(*row_values)
return table
@property
def result_as_str(self) -> str:
return self.result_as_text.plain
def render(self):
"""
Determine how and what to render to the console. If not using rich
rendering, all output is handled by typer, allowing outside tools such
as JQ to parse the results.
"""
if self.use_rich_rendering:
renderables: List[RenderableType] = []
if self.verbose:
d = self.details_as_text
renderables.append(d)
r = self.result_as_renderable
renderables.append(r)
if self.will_update:
s = self.spinner
renderables.append(s)
cli_content.rg = Group(*renderables)
else:
if self.verbose:
typer.secho(self.details_as_str, fg=self.detail_style, err=True)
typer.secho(self.result_as_str, fg=self.result_style, nl=True)
class RequestRunner:
"""
A utility class for encapsulating logic around repeated requests, error
handling, and output formatting. In general, run_and_render should be the
interface into instances. But for fine grain control on execution and
rendering, the individual methods may be called.
"""
def __init__(
self,
callable: Callable[[], GlobusHTTPResponse],
*,
format: OutputFormat = OutputFormat.json,
verbose: bool = False,
watch: bool = False,
run_once: bool = False,
fields: Optional[Type[DisplayFields]] = None,
detector: Type[CompletionDetector] = ActionCompletionDetector,
):
self.callable = callable
self.format = format
self.verbose = verbose
self.watch = watch
self.fields = fields
self.run_once = run_once
self.detector = detector
def run(self) -> Result:
result: Union[GlobusHTTPResponse, GlobusAPIError]
try:
result = self.callable()
except GlobusAPIError as err:
result = err
return Result(result, detector=self.detector)
def render(self, result: Result):
Renderer(
result,
verbose=self.verbose,
watching=self.watch,
format=self.format,
fields=self.fields,
run_once=self.run_once,
).render()
def render_as_result(
self,
d: Dict[str, Any],
client: BaseClient,
status_code: int = 200,
):
resp = Response()
resp.status_code = status_code
resp._content = json.dumps(d).encode("utf-8")
resp.headers.update({"Content-Type": "application/json"})
globus_resp = GlobusHTTPResponse(resp, client=client)
self.render(Result(globus_resp))
def run_and_render(self) -> Result:
result = self.run()
# It's assumed that no additional auth URL's will need to be written to STDOUT
# because of the successful call to `self.run()`, above.
with live_content:
while True:
self.render(result)
if not self.watch or self.run_once or result.completed:
return result
sleep(2)
result = self.run()
| 13,647 | 29.600897 | 86 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/actions.py
|
import functools
from typing import List
import typer
from globus_automate_client.cli.callbacks import (
input_validator,
principal_validator,
url_validator_callback,
)
from globus_automate_client.cli.constants import OutputFormat
from globus_automate_client.cli.helpers import (
output_format_option,
process_input,
verbosity_option,
)
from globus_automate_client.cli.rich_helpers import RequestRunner
from globus_automate_client.client_helpers import create_action_client
app = typer.Typer(short_help="Manage Globus Automate Actions")
@app.command("introspect")
def action_introspect(
action_url: str = typer.Option(
...,
help="The url at which the target Action Provider is located.",
prompt=True,
callback=url_validator_callback,
),
action_scope: str = typer.Option(
None,
help="The scope this Action Provider uses to authenticate requests.",
callback=url_validator_callback,
),
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
):
"""
Introspect an Action Provider's schema.
"""
ac = create_action_client(action_url, action_scope)
RequestRunner(ac.introspect, format=output_format, verbose=verbose).run_and_render()
@app.command("run")
def action_run(
action_url: str = typer.Option(
...,
help="The url at which the target Action Provider is located.",
prompt=True,
callback=url_validator_callback,
),
action_scope: str = typer.Option(
None,
help="The scope this Action Provider uses to authenticate requests.",
callback=url_validator_callback,
),
body: str = typer.Option(
...,
"--body",
"-b",
help=(
"The body to supply to the Action Provider. Can be a filename or raw "
"JSON string."
),
prompt=True,
callback=input_validator,
),
request_id: str = typer.Option(
None,
help="An identifier to associate with this Action invocation request",
),
manage_by: List[str] = typer.Option(
None,
help="A principal which may change the execution of the Action. The principal "
"is the user's or group's UUID prefixed with either 'urn:globus:groups:id:' "
"or 'urn:globus:auth:identity:' [repeatable]",
callback=principal_validator,
),
monitor_by: List[str] = typer.Option(
None,
help="A principal which may view the state of the Action. The principal "
"is the user's or group's UUID prefixed with either 'urn:globus:groups:id:' "
"or 'urn:globus:auth:identity:' [repeatable]",
callback=principal_validator,
),
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
label: str = typer.Option(
None,
"--label",
"-l",
help="Optional label to mark this execution of the action.",
),
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll this Action until it reaches a completed state.",
show_default=True,
),
):
"""
Launch an Action.
"""
parsed_body = process_input(body)
ac = create_action_client(action_url, action_scope)
method = functools.partial(
ac.run, parsed_body, request_id, manage_by, monitor_by, label=label
)
result = RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
run_once=True,
).run_and_render()
if not result.is_api_error and watch:
action_id = result.data.get("action_id")
method = functools.partial(ac.status, action_id)
RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
run_once=False,
).run_and_render()
@app.command("status")
def action_status(
action_url: str = typer.Option(
...,
help="The url at which the target Action Provider is located.",
prompt=True,
callback=url_validator_callback,
),
action_scope: str = typer.Option(
None,
help="The scope this Action Provider uses to authenticate requests.",
callback=url_validator_callback,
),
action_id: str = typer.Argument(...),
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll this Action until it reaches a completed state. ",
show_default=True,
),
):
"""
Query an Action's status by its ACTION_ID.
"""
ac = create_action_client(action_url, action_scope)
method = functools.partial(ac.status, action_id)
RequestRunner(
method, format=output_format, verbose=verbose, watch=watch, run_once=False
).run_and_render()
@app.command("resume")
def action_resume(
action_url: str = typer.Option(
...,
help="The url at which the target Action Provider is located.",
prompt=True,
callback=url_validator_callback,
),
action_scope: str = typer.Option(
None,
help="The scope this Action Provider uses to authenticate requests.",
callback=url_validator_callback,
),
query_for_inactive_reason: bool = typer.Option(
True,
help=(
"Should the Action first be queried to determine the reason for the "
"resume, and prompt for additional consent if needed."
),
),
action_id: str = typer.Argument(...),
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll this Action until it reaches a completed state. ",
show_default=True,
),
):
"""
Resume an inactive Action by its ACTION_ID.
"""
ac = create_action_client(action_url, action_scope=action_scope)
if query_for_inactive_reason:
result = RequestRunner(
functools.partial(ac.status, action_id),
format=output_format,
verbose=verbose,
watch=watch,
run_once=True,
).run()
if not result.is_api_error:
body = result.data
status = body.get("status")
details = body.get("details", {})
code = details.get("code")
if status == "INACTIVE" and code == "ConsentRequired":
required_scope = details.get("required_scope")
if required_scope is not None:
ac = create_action_client(action_url, action_scope=required_scope)
result = RequestRunner(
functools.partial(ac.resume, action_id),
format=output_format,
verbose=verbose,
watch=watch,
run_once=True,
).run_and_render()
if not result.is_api_error and watch:
RequestRunner(
functools.partial(ac.status, action_id),
format=output_format,
verbose=verbose,
watch=watch,
).run_and_render()
@app.command("cancel")
def action_cancel(
action_url: str = typer.Option(
...,
help="The url at which the target Action Provider is located.",
prompt=True,
callback=url_validator_callback,
),
action_scope: str = typer.Option(
None,
help="The scope this Action Provider uses to authenticate requests.",
callback=url_validator_callback,
),
action_id: str = typer.Argument(...),
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
):
"""
Terminate a running Action by its ACTION_ID.
"""
ac = create_action_client(action_url, action_scope)
method = functools.partial(ac.cancel, action_id)
RequestRunner(
method, format=output_format, verbose=verbose, watch=False, run_once=True
).run_and_render()
@app.command("release")
def action_release(
action_url: str = typer.Option(
...,
help="The url at which the target Action Provider is located.",
prompt=True,
callback=url_validator_callback,
),
action_scope: str = typer.Option(
None,
help="The scope this Action Provider uses to authenticate requests.",
callback=url_validator_callback,
),
action_id: str = typer.Argument(...),
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
):
"""
Remove an Action's execution history by its ACTION_ID.
"""
ac = create_action_client(action_url, action_scope)
method = functools.partial(ac.release, action_id)
RequestRunner(
method, format=output_format, verbose=verbose, watch=False, run_once=True
).run_and_render()
if __name__ == "__main__":
app()
| 9,011 | 29.969072 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/session.py
|
import json
import typer
from globus_automate_client.cli.auth import get_current_user, logout, revoke_login
from globus_automate_client.cli.helpers import verbosity_option
app = typer.Typer(short_help="Manage your session with the Automate Command Line")
@app.command("whoami")
def session_whoami(verbose: bool = verbosity_option):
"""
Determine the username for the identity logged in to Globus Auth.
If run with increased verbosity, the caller's full user information is
displayed.
"""
user = get_current_user()
if verbose:
output = json.dumps(user, indent=2)
else:
output = user["preferred_username"]
typer.secho(output, fg=typer.colors.GREEN)
@app.command("logout")
def session_logout():
"""
Remove all locally cached Globus Automate authentication information.
"""
logout()
typer.secho("Logged Out", fg=typer.colors.GREEN)
@app.command("revoke")
def session_revoke():
"""
Remove all locally cached Globus Automate authentication information and
invalidate all locally cached access or refresh tokens. These tokens can no
longer be used elsewhere.
"""
revoke_login()
typer.secho("All stored consents have been revoked", fg=typer.colors.GREEN)
| 1,256 | 27.568182 | 82 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/queues.py
|
import functools
from enum import Enum
from typing import List
import typer
from globus_automate_client.cli.auth import CLIENT_ID
from globus_automate_client.cli.callbacks import input_validator, principal_validator
from globus_automate_client.cli.constants import OutputFormat
from globus_automate_client.cli.helpers import output_format_option, verbosity_option
from globus_automate_client.cli.rich_helpers import RequestRunner
from globus_automate_client.queues_client import create_queues_client
class QueueRole(str, Enum):
admin = "admin"
send = "sender"
receive = "receiver"
app = typer.Typer(short_help="Manage Globus Automate Queues")
@app.command("list")
def queue_list(
roles: List[QueueRole] = typer.Option(
[QueueRole.admin],
"--role",
"-r",
help="Display Queues where you have the selected role. [repeatable]",
case_sensitive=False,
show_default=True,
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
List Queues for which you have access.
"""
qc = create_queues_client(CLIENT_ID)
method = functools.partial(qc.list_queues, roles=[r.value for r in roles])
RequestRunner(
method, format=output_format, verbose=verbose, watch=False
).run_and_render()
@app.command("create")
def queue_create(
label: str = typer.Option(..., help="A convenient name to identify the new Queue."),
admins: List[str] = typer.Option(
...,
"--admin",
help="The Principal URNs allowed to administer the Queue. [repeatable]",
callback=principal_validator,
),
senders: List[str] = typer.Option(
...,
"--sender",
help="The Principal URNs allowed to send to the Queue. [repeatable]",
callback=principal_validator,
),
receivers: List[str] = typer.Option(
...,
"--receiver",
help="The Principal URNs allowed to receive from the Queue. [repeatable]",
callback=principal_validator,
),
delivery_timeout: int = typer.Option(
60, # TODO Update this default timeout once Queue's default is updated
help=(
"The minimum amount of time (in seconds) that the Queue Service should "
"wait for a message-delete request after delivering a message before "
"making the message visible for receiving by other consumers once "
"again. If used in conjunction with 'receiver_url' this value "
"represents the minimum amount of time (in seconds) that the Queue "
"Service should attempt to retry delivery of messages to the "
"'receiver_url' if delivery is not initially successful"
),
min=1,
max=1209600,
show_default=True,
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Create a new Queue.
"""
qc = create_queues_client(CLIENT_ID)
method = functools.partial(
qc.create_queue, label, admins, senders, receivers, delivery_timeout
)
RequestRunner(method, format=output_format, verbose=verbose).run_and_render()
@app.command("update")
def queue_update(
queue_id: str = typer.Argument(...),
label: str = typer.Option(..., help="A convenient name to identify the new Queue."),
admins: List[str] = typer.Option(
...,
"--admin",
help="The Principal URNs allowed to administer the Queue. [repeatable]",
callback=principal_validator,
),
senders: List[str] = typer.Option(
...,
"--sender",
help="The Principal URNs allowed to send to the Queue. [repeatable]",
callback=principal_validator,
),
receivers: List[str] = typer.Option(
...,
"--receiver",
help="The Principal URNs allowed to receive from the Queue. [repeatable]",
callback=principal_validator,
),
delivery_timeout: int = typer.Option(
...,
help=(
"The minimum amount of time (in seconds) that the Queue Service should "
"wait for a message-delete request after delivering a message before "
"making the message visible for receiving by other consumers once "
"again. If used in conjunction with 'receiver_url' this value "
"represents the minimum amount of time (in seconds) that the Queue "
"Service should attempt to retry delivery of messages to the "
"'receiver_url' if delivery is not initially successful"
),
min=1,
max=1209600,
),
visibility_timeout: int = typer.Option(
30,
min=1,
max=43200,
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Update a Queue's properties. Requires the admin role on the Queue.
"""
qc = create_queues_client(CLIENT_ID)
method = functools.partial(
qc.update_queue,
queue_id,
label,
admins,
senders,
receivers,
delivery_timeout,
visibility_timeout,
)
RequestRunner(method, format=output_format, verbose=verbose).run_and_render()
@app.command("display")
def queue_display(
queue_id: str = typer.Argument(...),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Display the description of a Queue based on its id.
"""
qc = create_queues_client(CLIENT_ID)
RequestRunner(
functools.partial(qc.get_queue, queue_id), format=output_format, verbose=verbose
).run_and_render()
@app.command("delete")
def queue_delete(
queue_id: str = typer.Argument(...),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Delete a Queue based on its id. You must have either
created the Queue or have a role defined on the Queue.
"""
qc = create_queues_client(CLIENT_ID)
RequestRunner(
functools.partial(qc.delete_queue, queue_id),
format=output_format,
verbose=verbose,
).run_and_render()
@app.command("message-receive")
def queue_receive(
queue_id: str = typer.Argument(...),
max_messages: int = typer.Option(
None, help="The maximum number of messages to retrieve from the Queue", min=0
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Receive a message from a Queue. You must have the
"receiver" role on the Queue to perform this action.
"""
qc = create_queues_client(CLIENT_ID)
RequestRunner(
functools.partial(qc.receive_messages, queue_id, max_messages=max_messages),
format=output_format,
verbose=verbose,
).run_and_render()
@app.command("message-send")
def queue_send(
queue_id: str = typer.Argument(...),
message: str = typer.Option(
...,
"--message",
"-m",
help="Text of the message to send. Files may also be referenced.",
prompt=True,
callback=input_validator,
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Send a message to a Queue. You must have the "sender" role
on the Queue to perform this action.
"""
qc = create_queues_client(CLIENT_ID)
RequestRunner(
functools.partial(qc.send_message, queue_id, message),
format=output_format,
verbose=verbose,
).run_and_render()
@app.command("message-delete")
def queue_delete_message(
queue_id: str = typer.Argument(...),
receipt_handle: List[str] = typer.Option(
...,
help=(
"A receipt_handle value returned by a previous call to "
"receive message. [repeatable]"
),
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Notify a Queue that a message has been processed.
"""
qc = create_queues_client(CLIENT_ID)
RequestRunner(
functools.partial(qc.delete_messages, queue_id, receipt_handle),
format=output_format,
verbose=verbose,
).run_and_render()
| 8,256 | 30.880309 | 88 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/helpers.py
|
import json
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import requests
import typer
import yaml
from globus_sdk import GlobusAPIError, GlobusHTTPResponse
from globus_automate_client.cli.callbacks import flows_endpoint_envvar_callback
from globus_automate_client.cli.constants import (
ActionRoleAllNames,
FlowRoleAllNames,
OutputFormat,
)
GlobusCallable = Callable[[], GlobusHTTPResponse]
GlobusAPIResponse = Union[GlobusAPIError, GlobusHTTPResponse]
verbosity_option = typer.Option(
False, "--verbose", "-v", help="Run with increased verbosity", show_default=False
)
flows_env_var_option = typer.Option(
None,
hidden=True,
callback=flows_endpoint_envvar_callback,
)
output_format_option: OutputFormat = typer.Option(
OutputFormat.json,
"--format",
"-f",
help="Output display format.",
case_sensitive=False,
show_default=True,
)
def get_http_details(result: Union[GlobusHTTPResponse, GlobusAPIError]) -> str:
if isinstance(result, GlobusHTTPResponse):
if isinstance(result._response, requests.Response):
base_request = result._response.request
elif result._wrapped and isinstance(
result._wrapped._response, requests.Response
):
base_request = result._wrapped._response.request
else:
return "HTTP details are unavailable"
response_status_code = result.http_status
else: # isinstance(result, GlobusAPIError)
base_request = result._underlying_response.request
response_status_code = result._underlying_response.status_code
formatted_headers = "\n".join(
f" {k}: {v}" for k, v in base_request.headers.items()
)
http_details = (
f"Request: {base_request.method} {base_request.url}\n"
f"Headers:\n{formatted_headers}\n"
f"Response: {response_status_code}"
)
return http_details
def process_input(input_arg: Optional[str]) -> Optional[Mapping[str, Any]]:
"""
Turn input strings into dicts
"""
if input_arg is None:
return None
try:
input_dict = json.loads(input_arg)
except json.JSONDecodeError:
try:
input_dict = yaml.safe_load(input_arg)
except yaml.YAMLError:
raise typer.BadParameter("Unable to load input as JSON or YAML")
return input_dict
def parse_query_options(queries: Optional[List[str]]) -> Dict[str, str]:
result: Dict[str, str] = {}
if queries is None:
return result
for q in queries:
try:
field, pattern = q.split("=")
except ValueError:
raise typer.BadParameter(
f"Issue parsing '{q}'. Options should be of the form 'field=pattern'."
)
if pattern == "":
raise typer.BadParameter(f"Issue parsing '{q}'. Missing pattern.")
result[field] = pattern
return result
def make_role_param(
roles_list: Optional[Union[List[FlowRoleAllNames], List[ActionRoleAllNames]]]
) -> Mapping[str, Any]:
if roles_list is None or len(roles_list) == 0:
return {"role": None}
elif len(roles_list) == 1:
return {"role": roles_list[0].value}
else:
typer.secho(
"Warning: Use of multiple --role options is deprecated",
err=True,
fg=typer.colors.YELLOW,
)
return {"roles": [r.value for r in roles_list]}
| 3,446 | 28.715517 | 86 |
py
|
globus-automate-client
|
globus-automate-client-main/globus_automate_client/cli/flows.py
|
import functools
import textwrap
import uuid
import warnings
from typing import Any, List, Optional, Tuple
import typer
from globus_automate_client.cli.auth import CLIENT_ID
from globus_automate_client.cli.callbacks import (
custom_principal_validator,
flow_input_validator,
input_validator,
principal_validator,
url_validator_callback,
)
from globus_automate_client.cli.constants import (
ActionRole,
ActionRoleAllNames,
ActionStatus,
FlowRole,
FlowRoleAllNames,
ImageOutputFormat,
ListingOutputFormat,
OutputFormat,
RunLogOutputFormat,
)
from globus_automate_client.cli.helpers import (
flows_env_var_option,
make_role_param,
output_format_option,
parse_query_options,
process_input,
verbosity_option,
)
from globus_automate_client.cli.rich_helpers import (
FlowListDisplayFields,
LogCompletionDetector,
RequestRunner,
RunListDisplayFields,
RunLogDisplayFields,
)
from globus_automate_client.client_helpers import create_flows_client
from globus_automate_client.flows_client import (
RUN_STATUS_SCOPE,
FlowValidationError,
validate_flow_definition,
)
from globus_automate_client.helpers import validate_aliases
app = typer.Typer(short_help="Manage Globus Automate Flows")
_principal_description = (
"The principal value is the user's Globus Auth username or their identity "
"UUID in the form urn:globus:auth:identity:<UUID>. A Globus Group may also be "
"used using the form urn:globus:groups:id:<GROUP_UUID>."
)
def dedent(text: str) -> str:
"""Dedent help text, so it wraps neatly on the command line."""
return textwrap.dedent(text).strip()
def handle_aliases(canonical_item: Tuple[str, Any], *aliases: Tuple[str, Any]) -> Any:
"""Validate aliases, and handle exceptions in a CLI context."""
try:
return validate_aliases(canonical_item, *aliases)
except ValueError as error:
typer.secho(error.args[0], err=True)
raise typer.Abort()
except DeprecationWarning as warning:
typer.secho(warning.args[0], err=True)
return warning.args[2]
@app.callback()
def flows():
"""
Manage Globus Automate Flows
To target a different Flows service endpoint, export the
GLOBUS_AUTOMATE_FLOWS_ENDPOINT environment variable.
"""
@app.command("deploy")
def flow_deploy(
title: str = typer.Option(..., help="The Flow's title.", prompt=True),
definition: str = typer.Option(
...,
help=(
"JSON or YAML representation of the Flow to deploy. "
"May be provided as a filename or a raw string "
"representing a JSON object or YAML definition."
),
prompt=True,
callback=input_validator,
),
subtitle: str = typer.Option(
None,
help="A subtitle for the Flow providing additional, brief description.",
),
description: str = typer.Option(
None, help="A long form description of the Flow's purpose or usage."
),
input_schema: str = typer.Option(
None,
help=(
"A JSON or YAML representation of a JSON Schema which will be used to "
"validate the input to the deployed Flow when it is run. "
"If not provided, no validation will be performed on Flow input. "
"May be provided as a filename or a raw string."
),
callback=input_validator,
),
keywords: List[str] = typer.Option(
None,
"--keyword",
help="A keyword which may categorize or help discover the Flow. [repeatable]",
),
flow_viewer: List[str] = typer.Option(
None,
help=(
"A principal which may view this Flow. "
+ _principal_description
+ " The special value of 'public' may be used to "
"indicate that any user can view this Flow. [repeatable]"
),
callback=custom_principal_validator({"public"}),
hidden=False,
),
# viewer and visible_to are aliases for flow_viewer.
# Both are deprecated.
viewer: List[str] = typer.Option(
None,
callback=custom_principal_validator({"public"}),
hidden=True,
),
visible_to: List[str] = typer.Option(
None,
callback=custom_principal_validator({"public"}),
hidden=True,
),
flow_starter: List[str] = typer.Option(
None,
help=(
"A principal which may run an instance of the deployed Flow. "
+ _principal_description
+ "The special value of "
"'all_authenticated_users' may be used to indicate that "
"any authenticated user can invoke this flow. [repeatable]"
),
callback=custom_principal_validator({"all_authenticated_users"}),
),
# starter and runnable_by are aliases for flow_starter.
# Both are deprecated.
starter: List[str] = typer.Option(
None,
callback=custom_principal_validator({"all_authenticated_users"}),
hidden=True,
),
runnable_by: List[str] = typer.Option(
None,
callback=custom_principal_validator({"all_authenticated_users"}),
hidden=True,
),
flow_administrator: List[str] = typer.Option(
None,
help=(
"A principal which may update the deployed Flow. "
+ _principal_description
+ "[repeatable]"
),
callback=principal_validator,
),
# administrator and administered_by are aliases for flow_administrator.
# Both are deprecated.
administrator: List[str] = typer.Option(
None, callback=principal_validator, hidden=True
),
administered_by: List[str] = typer.Option(
None, callback=principal_validator, hidden=True
),
subscription_id: Optional[str] = typer.Option(
None,
help="The ID of the Globus Subscription which will manage the Flow.",
),
validate: bool = typer.Option(
True,
help="(EXPERIMENTAL) Perform rudimentary validation of the flow definition.",
case_sensitive=False,
show_default=True,
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
dry_run: bool = typer.Option(
False,
"--dry-run",
help=(
"Do a dry run of deploying the flow to test your definition without"
" actually making changes."
),
),
):
"""
Deploy a new Flow.
"""
flow_viewer = handle_aliases(
("--flow-viewer", flow_viewer),
("--viewer", viewer or None),
("--visible-to", visible_to or None),
)
flow_starter = handle_aliases(
("--flow-starter", flow_starter),
("--starter", starter or None),
("--runnable-by", runnable_by or None),
)
flow_administrator = handle_aliases(
("--flow-administrator", flow_administrator),
("--administrator", administrator or None),
("--administered-by", administered_by or None),
)
fc = create_flows_client(CLIENT_ID, flows_endpoint)
flow_dict = process_input(definition)
input_schema_dict = process_input(input_schema)
if input_schema_dict is None:
# If no input schema is provided, default to a no-op schema.
input_schema_dict = {}
method = functools.partial(
fc.deploy_flow,
flow_dict,
title,
subtitle,
description,
keywords,
flow_viewer,
flow_starter,
flow_administrator,
subscription_id,
input_schema_dict,
validate_definition=validate,
dry_run=dry_run,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
RequestRunner(method, format=output_format, verbose=verbose).run_and_render()
@app.command("get")
def flow_get(
flow_id: uuid.UUID = typer.Argument(..., help="A deployed Flow's ID"),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
flows_endpoint: str = flows_env_var_option,
):
"""
Get a Flow's definition as it exists on the Flows service.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(fc.get_flow, str(flow_id))
RequestRunner(method, format=output_format, verbose=verbose).run_and_render()
@app.command("update")
def flow_update(
flow_id: str = typer.Argument(...),
title: str = typer.Option(None, help="The Flow's title."),
definition: str = typer.Option(
None,
help=(
"JSON or YAML representation of the Flow to update. "
"May be provided as a filename or a raw string."
),
callback=input_validator,
),
subtitle: str = typer.Option(
None,
help="A subtitle for the Flow providing additional, brief description.",
),
description: str = typer.Option(
None, help="A long form description of the Flow's purpose or usage."
),
input_schema: str = typer.Option(
None,
help=(
"A JSON or YAML representation of a JSON Schema which will be used to "
"validate the input to the deployed Flow when it is run. "
"If not provided, no validation will be performed on Flow input. "
"May be provided as a filename or a raw string."
),
callback=input_validator,
),
keywords: List[str] = typer.Option(
None,
"--keyword",
help="A keyword which may categorize or help discover the Flow. [repeatable]",
),
flow_viewer: Optional[List[str]] = typer.Option(
None,
help=(
"A principal which may view this flow. "
f"{_principal_description} "
"\n\nThe special value of 'public' may be used to "
"indicate that any user can view this flow. "
"\n\nThis option can be used multiple times."
"\n\nTo erase any existing viewer permissions, "
'use the empty string "" once.'
),
callback=custom_principal_validator({"public", ""}),
),
viewer: List[str] = typer.Option(
None, callback=custom_principal_validator({"public", ""}), hidden=True
),
visible_to: List[str] = typer.Option(
None, callback=custom_principal_validator({"public", ""}), hidden=True
),
flow_starter: Optional[List[str]] = typer.Option(
None,
help=(
"A principal which may start an instance of the flow. "
f"{_principal_description}"
"\n\nThe special value of 'all_authenticated_users' may be used "
"to indicate that any authenticated user can invoke this flow. "
"\n\nThis option can be used multiple times."
"\n\nTo erase any existing starter permissions, "
'use the empty string "" once.'
),
callback=custom_principal_validator({"all_authenticated_users", ""}),
),
starter: List[str] = typer.Option(
None,
callback=custom_principal_validator({"all_authenticated_users", ""}),
hidden=True,
),
runnable_by: List[str] = typer.Option(
None,
callback=custom_principal_validator({"all_authenticated_users", ""}),
hidden=True,
),
flow_administrator: Optional[List[str]] = typer.Option(
None,
help=(
"A principal which may update the deployed Flow. "
f"{_principal_description} "
"\n\nThis option can be used multiple times."
"\n\nTo erase any existing administrator permissions, "
'use the empty string "" once.'
),
callback=custom_principal_validator({""}),
),
administrator: List[str] = typer.Option(
None, callback=custom_principal_validator({""}), hidden=True
),
administered_by: List[str] = typer.Option(
None, callback=custom_principal_validator({""}), hidden=True
),
subscription_id: Optional[str] = typer.Option(
None,
help="The Globus Subscription which will be used to make this flow managed.",
),
validate: bool = typer.Option(
True,
help="(EXPERIMENTAL) Perform rudimentary validation of the flow definition.",
case_sensitive=False,
show_default=True,
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
):
"""
Update a Flow.
"""
flow_viewer = handle_aliases(
("--flow-viewer", flow_viewer),
("--viewer", viewer or None),
("--visible-to", visible_to or None),
)
flow_starter = handle_aliases(
("--flow-starter", flow_starter),
("--starter", starter or None),
("--runnable-by", runnable_by or None),
)
flow_administrator = handle_aliases(
("--flow-administrator", flow_administrator),
("--administrator", administrator or None),
("--administered-by", administered_by or None),
)
# Special cases:
# * If the user specifies a single empty string, replace [""] with []
# so all values currently set on the flow will be erased.
# * If the user specifies nothing, replace the default empty list with None
# to prevent erasure of the values currently set on the flow.
flow_viewer = [] if flow_viewer == [""] else (flow_viewer or None)
flow_starter = [] if flow_starter == [""] else (flow_starter or None)
flow_administrator = (
[] if flow_administrator == [""] else (flow_administrator or None)
)
fc = create_flows_client(CLIENT_ID, flows_endpoint)
flow_dict = process_input(definition)
input_schema_dict = process_input(input_schema)
method = functools.partial(
fc.update_flow,
flow_id,
flow_dict,
title,
subtitle,
description,
keywords,
flow_viewer,
flow_starter,
flow_administrator,
subscription_id,
input_schema_dict,
validate_definition=validate,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
RequestRunner(method, format=output_format, verbose=verbose).run_and_render()
@app.command("lint")
def flow_lint(
definition: str = typer.Option(
...,
help=(
"JSON or YAML representation of the Flow to deploy. "
"May be provided as a filename or a raw string."
),
prompt=True,
callback=input_validator,
),
):
"""
Parse and validate a Flow definition by providing visual output.
"""
flow_dict = process_input(definition)
try:
validate_flow_definition(flow_dict)
except FlowValidationError as fve:
typer.secho(str(fve), fg=typer.colors.RED)
raise typer.Exit(code=1)
typer.secho("No issues found in the Flow definition.", fg=typer.colors.GREEN)
@app.command("list")
def flow_list(
roles: List[FlowRoleAllNames] = typer.Option(
[FlowRole.flow_owner],
"--role",
"-r",
help=(
"Display Flows where you have at least the selected role. "
"Precedence of roles is: flow_viewer, flow_starter, flow_administrator, "
"flow_owner. Thus, by specifying, for example, flow_starter, all flows "
"for which you have flow_starter, flow_administrator, or flow_owner roles "
"will be displayed. Values visible_to, runnable_by, administered_by and "
"created_by are deprecated. [repeatable use deprecated as the lowest "
"precedence value provided will determine the flows displayed.]"
),
case_sensitive=False,
show_default=True,
),
marker: str = typer.Option(
None,
"--marker",
"-m",
help="A pagination token for iterating through returned data.",
),
per_page: int = typer.Option(
None,
"--per-page",
"-p",
help=(
"The page size to return. Only valid when used without providing a marker."
),
min=1,
max=50,
),
flows_endpoint: str = flows_env_var_option,
filters: Optional[List[str]] = typer.Option(
None,
"--filter",
help="A filtering criteria in the form 'key=value' to apply to the "
"resulting Flow listing. The key indicates the filter, the value "
"indicates the pattern to match. Multiple patterns for a single key may "
"be specified as a comma separated string, the results for which will "
"represent a logical OR. If multiple filters are applied, the returned "
"data will be the result of a logical AND between them. [repeatable]",
),
orderings: Optional[List[str]] = typer.Option(
None,
"--orderby",
help="An ordering criteria in the form 'key=value' to apply to the resulting "
"Flow listing. The key indicates the field to order on, and the value is "
"either ASC, for ascending order, or DESC, for descending order. The first "
"ordering criteria will be used to sort the data, subsequent ordering criteria "
"will further sort ties. [repeatable]",
),
verbose: bool = verbosity_option,
output_format: ListingOutputFormat = typer.Option(
ListingOutputFormat.table,
"--format",
"-f",
help="Output display format.",
case_sensitive=False,
show_default=True,
),
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll for new Flows.",
show_default=True,
),
):
"""
List Flows for which you have access.
"""
parsed_filters = parse_query_options(filters)
parsed_orderings = parse_query_options(orderings)
role_param = make_role_param(roles)
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(
fc.list_flows,
marker=marker,
per_page=per_page,
filters=parsed_filters,
orderings=parsed_orderings,
**role_param,
)
RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
fields=FlowListDisplayFields,
).run_and_render()
@app.command("display")
def flow_display(
flow_id: str = typer.Argument("", show_default=False),
flow_definition: str = typer.Option(
"",
help=(
"JSON or YAML representation of the Flow to display. "
"May be provided as a filename or a raw string "
"representing a JSON object or YAML definition."
),
callback=input_validator,
show_default=False,
),
output_format: ImageOutputFormat = typer.Option(
ImageOutputFormat.json,
"--format",
"-f",
help="Output display format.",
case_sensitive=False,
show_default=True,
),
flows_endpoint: str = flows_env_var_option,
):
"""
Visualize a local or deployed Flow definition. If providing a Flow's ID, You
must have either created the Flow or be present in the Flow's "flow_viewers"
list to view it.
"""
if not flow_definition and not flow_id:
raise typer.BadParameter("Either FLOW_ID or --flow_definition should be set.")
if flow_definition and flow_id:
raise typer.BadParameter(
"Only one of FLOW_ID or --flow_definition should be set."
)
fc = create_flows_client(CLIENT_ID, flows_endpoint)
rr = RequestRunner(
functools.partial(fc.get_flow, flow_id),
format=output_format,
verbose=False,
watch=False,
)
if flow_id:
result = rr.run()
if result.is_api_error:
rr.format = (
output_format
if output_format in {ImageOutputFormat.json, ImageOutputFormat.yaml}
else ImageOutputFormat.json
)
rr.render(result)
raise typer.Exit(1)
else:
flow_dict = result.data["definition"]
else:
flow_dict = process_input(flow_definition)
if output_format in {ImageOutputFormat.json, ImageOutputFormat.yaml}:
rr.render_as_result(flow_dict, client=fc, status_code=result.result.http_status)
else:
output_format.visualize(flow_dict)
@app.command("delete")
def flow_delete(
flow_id: str = typer.Argument(...),
output_format: OutputFormat = output_format_option,
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
):
"""
Delete a Flow. You must be in the Flow's "flow_administrators" list.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(fc.delete_flow, flow_id)
RequestRunner(
method,
format=output_format,
verbose=verbose,
).run_and_render()
@app.command("run")
def flow_run(
flow_id: str = typer.Argument(...),
flow_input: str = typer.Option(
...,
help=(
"JSON or YAML formatted input to the Flow. May be provided as a filename "
"or a raw string."
),
callback=flow_input_validator,
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
run_manager: List[str] = typer.Option(
None,
help="A principal which may change the execution of the Flow instance. "
+ _principal_description
+ " [repeatable]",
callback=principal_validator,
),
manage_by: List[str] = typer.Option(
None, callback=principal_validator, hidden=True
),
run_monitor: List[str] = typer.Option(
None,
help="A principal which may monitor the execution of the Flow instance. "
+ _principal_description
+ " [repeatable]",
callback=principal_validator,
),
monitor_by: List[str] = typer.Option(
None, callback=principal_validator, hidden=True
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
output_format: ListingOutputFormat = typer.Option(
None,
"--format",
"-f",
help=(
"Output display format."
" If --watch is enabled then the default is 'table',"
" otherwise 'json' is the default."
),
case_sensitive=False,
show_default=False,
),
label: str = typer.Option(
...,
"--label",
"-l",
help="Label to mark this run.",
),
tags: Optional[List[str]] = typer.Option(
None,
"--tag",
help=dedent(
"""
A tag to associate with this Run.
This option can be used multiple times.
The full collection of tags will associated with the Run.
"""
),
),
watch: bool = typer.Option(
False,
"--watch",
"-w",
help=(
"Continuously poll this Action until it reaches a completed state."
" If enabled the default output format is 'table'."
),
show_default=True,
),
dry_run: bool = typer.Option(
False,
"--dry-run",
help=(
"Do a dry run with your input to this flow to test the input without"
" actually running anything."
),
),
):
"""
Run an instance of a Flow. The argument provides the initial state of the Flow.
You must be in the Flow's "flow_starters" list.
"""
if not output_format:
# Default to JSON if the user did not specify an output format.
# However, if watch is enabled, default to tabular output.
output_format = ListingOutputFormat.json
if watch:
output_format = ListingOutputFormat.table
fc = create_flows_client(CLIENT_ID, flows_endpoint)
flow_input_dict = process_input(flow_input)
method = functools.partial(
fc.run_flow,
flow_id,
flow_scope,
flow_input_dict,
run_monitors=run_monitor,
run_managers=run_manager,
label=label,
dry_run=dry_run,
monitor_by=monitor_by,
manage_by=manage_by,
tags=tags,
)
result = RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
fields=RunLogDisplayFields,
run_once=True,
).run_and_render()
if not result.is_api_error and watch:
action_id = result.data.get("action_id")
return flow_action_log(
action_id=action_id,
flow_id=flow_id,
flow_scope=flow_scope,
reverse=False,
limit=100,
marker=None,
per_page=50,
output_format=output_format,
watch=watch,
flows_endpoint=flows_endpoint,
verbose=verbose,
)
@app.command("action-list")
@app.command("run-list")
def flow_actions_list(
flow_id: Optional[str] = typer.Option(
None,
help="The ID for the Flow which triggered the Action. If not present runs "
"from all Flows will be displayed.",
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
roles: List[ActionRoleAllNames] = typer.Option(
None,
"--role",
help=(
"Display Actions/Runs where you have at least the selected role. "
"Precedence of roles is: run_monitor, run_manager, "
"run_owner. Thus, by specifying, for example, run_manager, all runs "
"for which you have run_manager or run_owner roles "
"will be displayed. [repeatable use deprecated as the lowest precedence "
"value provided will determine the flows displayed.]"
),
),
statuses: List[ActionStatus] = typer.Option(
[],
"--status",
help="Display Actions with the selected status. [repeatable]",
),
marker: str = typer.Option(
None,
"--marker",
"-m",
help="A pagination token for iterating through returned data.",
),
per_page: int = typer.Option(
None,
"--per-page",
"-p",
help=(
"The page size to return. Only valid when used without providing a marker."
),
min=1,
max=50,
),
filters: Optional[List[str]] = typer.Option(
None,
"--filter",
help="A filtering criteria in the form 'key=value' to apply to the "
"resulting Action listing. The key indicates the filter, the value "
"indicates the pattern to match. Multiple patterns for a single key may "
"be specified as a comma separated string, the results for which will "
"represent a logical OR. If multiple filters are applied, the returned "
"data will be the result of a logical AND between them. [repeatable]",
),
orderings: Optional[List[str]] = typer.Option(
None,
"--orderby",
help="An ordering criteria in the form 'key=value' to apply to the resulting "
"Flow listing. The key indicates the field to order on, and the value is "
"either ASC, for ascending order, or DESC, for descending order. The first "
"ordering criteria will be used to sort the data, subsequent ordering criteria "
"will further sort ties. [repeatable]",
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll for new Actions.",
show_default=True,
),
output_format: ListingOutputFormat = typer.Option(
ListingOutputFormat.table,
"--format",
"-f",
help="Output display format.",
case_sensitive=False,
show_default=True,
),
):
"""
List a Flow definition's discrete invocations.
"""
parsed_filters = parse_query_options(filters)
parsed_orderings = parse_query_options(orderings)
statuses_str = [s.value for s in statuses]
role_param = make_role_param(roles)
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(
fc.list_flow_actions,
flow_id=flow_id,
flow_scope=flow_scope,
statuses=statuses_str,
marker=marker,
per_page=per_page,
filters=parsed_filters,
orderings=parsed_orderings,
**role_param,
)
RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
fields=RunListDisplayFields,
).run_and_render()
@app.command("action-status")
@app.command("run-status")
def flow_action_status(
action_id: str = typer.Argument(...),
flow_id: uuid.UUID = typer.Option(
...,
help="The ID for the Flow which triggered the Action.",
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
flows_endpoint: str = flows_env_var_option,
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll this Action until it reaches a completed state.",
show_default=True,
),
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Display the status for a Flow definition's particular invocation.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(fc.flow_action_status, flow_id, flow_scope, action_id)
RequestRunner(
method, format=output_format, verbose=verbose, watch=watch
).run_and_render()
@app.command("run-definition")
def get_flow_definition_for_run(
run_id: str = typer.Argument(...),
flow_id: uuid.UUID = typer.Option(
...,
help="The ID for the Flow which triggered the Action.",
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
flows_endpoint: str = flows_env_var_option,
output_format: OutputFormat = output_format_option,
verbose: bool = verbosity_option,
):
"""
Get the flow definition and input schema used to start this run.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(
fc.get_flow_definition_for_run, flow_id, flow_scope, run_id
)
RequestRunner(method, format=output_format, verbose=verbose).run_and_render()
@app.command("action-resume")
@app.command("run-resume")
def flow_action_resume(
action_id: str = typer.Argument(...),
flow_id: str = typer.Option(
...,
help="The ID for the Flow which triggered the Action.",
prompt=True,
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
query_for_inactive_reason: bool = typer.Option(
True,
help=(
"Should the Action first be queried to determine the reason for the "
"resume, and prompt for additional consent if needed."
),
),
flows_endpoint: str = flows_env_var_option,
output_format: OutputFormat = output_format_option,
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll this Action until it reaches a completed state.",
show_default=True,
),
verbose: bool = verbosity_option,
):
"""Resume a Flow in the INACTIVE state. If query-for-inactive-reason is set,
and the Flow Action is in an INACTIVE state due to requiring additional Consent,
the required Consent will be determined, and you may be prompted to allow Consent
using the Globus Auth web interface.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
if query_for_inactive_reason:
result = RequestRunner(
functools.partial(fc.flow_action_status, flow_id, flow_scope, action_id),
format=output_format,
verbose=verbose,
watch=watch,
run_once=True,
).run_and_render()
if not result.is_api_error:
body = result.data
status = body.get("status")
details = body.get("details", {})
code = details.get("code")
if status == "INACTIVE" and code == "ConsentRequired":
flow_scope = details.get("required_scope")
result = RequestRunner(
functools.partial(fc.flow_action_resume, flow_id, flow_scope, action_id),
format=output_format,
verbose=verbose,
watch=watch,
run_once=True,
).run_and_render()
if not result.is_api_error and watch:
RequestRunner(
functools.partial(fc.flow_action_status, flow_id, flow_scope, action_id),
format=output_format,
verbose=verbose,
watch=watch,
).run_and_render()
@app.command("action-release")
@app.command("run-release")
def flow_action_release(
action_id: str = typer.Argument(...),
flow_id: str = typer.Option(
...,
help="The ID for the Flow which triggered the Action.",
prompt=True,
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
output_format: OutputFormat = output_format_option,
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
):
"""
Remove execution history for a particular Flow definition's invocation.
After this, no further information about the run can be accessed.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(fc.flow_action_release, flow_id, flow_scope, action_id)
RequestRunner(
method, format=output_format, verbose=verbose, watch=False
).run_and_render()
@app.command("action-cancel")
@app.command("run-cancel")
def flow_action_cancel(
action_id: str = typer.Argument(...),
flow_id: str = typer.Option(
...,
help="The ID for the Flow which triggered the Action.",
prompt=True,
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
output_format: OutputFormat = output_format_option,
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
):
"""
Cancel an active execution for a particular Flow definition's invocation.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(fc.flow_action_cancel, flow_id, flow_scope, action_id)
RequestRunner(
method, format=output_format, verbose=verbose, watch=False
).run_and_render()
@app.command("action-log")
@app.command("run-log")
def flow_action_log(
action_id: str = typer.Argument(...),
flow_id: str = typer.Option(
...,
help="The ID for the Flow which triggered the Action.",
prompt=True,
),
flow_scope: str = typer.Option(
None,
help="The scope this Flow uses to authenticate requests.",
callback=url_validator_callback,
),
reverse: bool = typer.Option(
False,
"--reverse",
help="Display logs reverse chronological order (most recent first).",
show_default=True,
),
limit: int = typer.Option(
None,
help="Set a maximum number of events from the log to return.",
min=1,
max=100,
),
marker: Optional[str] = typer.Option(
None,
"--marker",
"-m",
help="A pagination token for iterating through returned data.",
),
per_page: int = typer.Option(
None,
"--per-page",
"-p",
help=(
"The page size to return. Only valid when used without providing a marker."
),
min=1,
max=50,
),
output_format: RunLogOutputFormat = typer.Option(
RunLogOutputFormat.table,
"--format",
"-f",
help="Output display format.",
case_sensitive=False,
show_default=True,
),
watch: bool = typer.Option(
False,
"--watch",
"-w",
help=(
"Continuously poll this Action until it reaches a completed state."
" Using this option will report only the latest state available."
),
show_default=True,
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
):
"""
Get a log of the steps executed by a Flow definition's invocation.
"""
fc = create_flows_client(CLIENT_ID, flows_endpoint)
method = functools.partial(
fc.flow_action_log,
flow_id,
flow_scope,
action_id,
limit,
reverse,
marker,
per_page,
)
rr = RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
fields=RunLogDisplayFields,
detector=LogCompletionDetector,
)
if output_format in {
RunLogOutputFormat.json,
RunLogOutputFormat.yaml,
RunLogOutputFormat.table,
}:
rr.run_and_render()
else:
result = rr.run()
if not result.is_api_error:
flow_def = fc.get_flow(flow_id)
output_format.visualize(result.result, flow_def)
else:
rr.format = RunLogOutputFormat.json
rr.render(result)
@app.command("action-enumerate")
@app.command("run-enumerate")
def flow_action_enumerate(
roles: List[ActionRoleAllNames] = typer.Option(
[ActionRole.run_owner],
"--role",
help="Display Actions/Runs where you have at least the selected role. "
"Precedence of roles is: run_monitor, run_manager, run_owner. "
"Thus, by specifying, for example, run_manager, all flows "
"for which you have run_manager or run_owner roles "
"will be displayed. Values monitored_by, managed_by and created_by "
"are deprecated. [repeatable use deprecated as the lowest "
"precedence value provided will determine the Actions/Runs displayed.]",
),
statuses: List[ActionStatus] = typer.Option(
[],
"--status",
help="Display Actions with the selected status. [repeatable]",
),
marker: str = typer.Option(
None,
"--marker",
"-m",
help="A pagination token for iterating through returned data.",
),
per_page: int = typer.Option(
None,
"--per-page",
"-p",
help=(
"The page size to return. Only valid when used without providing a marker."
),
min=1,
max=50,
),
filters: Optional[List[str]] = typer.Option(
None,
"--filter",
help="A filtering criteria in the form 'key=value' to apply to the "
"resulting Action listing. The key indicates the filter, the value "
"indicates the pattern to match. Multiple patterns for a single key may "
"be specified as a comma separated string, the results for which will "
"represent a logical OR. If multiple filters are applied, the returned "
"data will be the result of a logical AND between them. [repeatable]",
),
orderings: Optional[List[str]] = typer.Option(
None,
"--orderby",
help="An ordering criteria in the form 'key=value' to apply to the resulting "
"Flow listing. The key indicates the field to order on, and the value is "
"either ASC, for ascending order, or DESC, for descending order. The first "
"ordering criteria will be used to sort the data, subsequent ordering criteria "
"will further sort ties. [repeatable]",
),
watch: bool = typer.Option(
False,
"--watch",
"-w",
help="Continuously poll for new Actions.",
show_default=True,
),
output_format: ListingOutputFormat = typer.Option(
ListingOutputFormat.table,
"--format",
"-f",
help="Output display format.",
case_sensitive=False,
show_default=True,
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
):
"""
Retrieve all Flow Runs you have access to view.
"""
parsed_filters = parse_query_options(filters)
parsed_orderings = parse_query_options(orderings)
statuses_str = [s.value for s in statuses]
role_param = make_role_param(roles)
fc = create_flows_client(CLIENT_ID, flows_endpoint, RUN_STATUS_SCOPE)
method = functools.partial(
fc.enumerate_actions,
statuses=statuses_str,
marker=marker,
per_page=per_page,
filters=parsed_filters,
orderings=parsed_orderings,
**role_param,
)
RequestRunner(
method,
format=output_format,
verbose=verbose,
watch=watch,
fields=RunListDisplayFields,
).run_and_render()
@app.command("action-update")
@app.command("run-update")
def update_run(
run_id: str = typer.Argument(...),
run_managers: Optional[List[str]] = typer.Option(
None,
"--run-manager",
help="A principal which may change the execution of the Run."
+ _principal_description
+ " Specify an empty string once to erase all Run managers."
+ " [repeatable]",
callback=custom_principal_validator({""}),
),
run_monitors: Optional[List[str]] = typer.Option(
None,
"--run-monitor",
help="A principal which may monitor the execution of the Run."
+ _principal_description
+ " [repeatable]",
callback=custom_principal_validator({""}),
),
tags: Optional[List[str]] = typer.Option(
None,
"--tag",
help=(
"A tag to associate with the Run."
" If specified, the existing tags on the Run will be replaced"
" with the list of tags specified here."
" Specify an empty string once to erase all tags."
" [repeatable]"
),
),
label: Optional[str] = typer.Option(
None,
help="A label to associate with the Run.",
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
):
"""
Update a Run on the Flows service.
"""
# Special cases:
# * If the user specifies a single empty string, replace [""] with []
# so all values currently set on the Run will be erased.
# * If the user specifies nothing, replace the default empty list with None
# to prevent erasure of the values currently set on the Run.
run_managers = [] if run_managers == [""] else (run_managers or None)
run_monitors = [] if run_monitors == [""] else (run_monitors or None)
tags = [] if tags and list(tags) == [""] else (tags or None)
fc = create_flows_client(CLIENT_ID, flows_endpoint, RUN_STATUS_SCOPE)
RequestRunner(
functools.partial(
fc.flow_action_update,
run_id,
label=label,
run_managers=run_managers,
run_monitors=run_monitors,
tags=tags,
),
format=output_format,
verbose=verbose,
).run_and_render()
@app.command("batch-run-update")
def update_runs(
run_ids: List[str] = typer.Argument(...),
#
# Run manager parameters
set_run_managers: Optional[List[str]] = typer.Option(
None,
"--set-run-manager",
help="Set a principal on affected Runs that can change the Run execution.",
callback=custom_principal_validator({""}),
),
add_run_managers: Optional[List[str]] = typer.Option(
None,
"--add-run-manager",
help="Add a principal to affected Runs that can change the Run execution.",
callback=custom_principal_validator({""}),
),
remove_run_managers: Optional[List[str]] = typer.Option(
None,
"--remove-run-manager",
help="Remove a principal from affected Runs that can change the Run execution.",
callback=custom_principal_validator({""}),
),
#
# Run monitor parameters
set_run_monitors: Optional[List[str]] = typer.Option(
None,
"--set-run-monitor",
help="Set a principal on affected Runs that can monitor Run execution.",
callback=custom_principal_validator({""}),
),
add_run_monitors: Optional[List[str]] = typer.Option(
None,
"--add-run-monitor",
help="Add a principal to affected Runs that can monitor Run execution.",
callback=custom_principal_validator({""}),
),
remove_run_monitors: Optional[List[str]] = typer.Option(
None,
"--remove-run-monitor",
help="Remove a principal from affected Runs that can monitor Run execution.",
callback=custom_principal_validator({""}),
),
#
# Tag parameters
set_tags: Optional[List[str]] = typer.Option(
None,
"--set-tag",
help="A tag to set on the specified Runs.",
),
add_tags: Optional[List[str]] = typer.Option(
None,
"--add-tag",
help="A tag to add to the affected Runs.",
),
remove_tags: Optional[List[str]] = typer.Option(
None,
"--remove-tag",
help="A tag to remove from the affected Runs.",
),
status: Optional[str] = typer.Option(
None,
help=dedent(
"""
Set the status of the affected Runs.
Currently, "cancel" is the only valid value.
"""
),
),
flows_endpoint: str = flows_env_var_option,
verbose: bool = verbosity_option,
output_format: OutputFormat = output_format_option,
):
"""
Update metadata and permissions on one or more Runs.
\b
Modifying lists of values
=========================
Most options support set, add, and remove operations.
The "add" option variants will add the specified value
to whatever is set on each affected Run.
For example, if one Run has a "star" tag and another has a "circle" tag,
`--add-tag square` will result in a Run with "star" and "square" tags,
and the other Run will have "circle" and "square" tags.
The "remove" option variants will remove the specified value
from whatever is set on each affected Run.
There will not be an error if the value is not set on a Run.
For example, if one Run has a "star" tag and another has a "circle" tag,
`--remove-tag star` will result in a Run with no tags
while the other still has a "circle" tag.
The "set" option variants will overwrite the metadata and permissions
currently set on all affected Runs.
For example, `--set-tag example` will standardize all affected Runs
so that they have just one tag: "example".
To remove all values on all affected Runs,
use the "set" variant of an option with an empty string.
For example, to erase all Run monitors, use `--set-run-monitors ""`.
All options with "set", "add", and "remove" variants can be used multiple times.
However, only one variation of an option can be specified at a time.
For example, `--set-tag` and `--add-tag` cannot be combined in the same command,
and `--set-run-manager` and `--add-run-manager` cannot be combined.
It is fine to combine `--add-tag` and `--remove-run-manager`.
\b
Modifying roles
===============
Run managers and monitors must be specified in one of these forms:
\b
* A user's Globus Auth username
* A user's identity UUID in the form urn:globus:auth:identity:<UUID>
* A group's identity UUID in the form urn:globus:groups:id:<GROUP_UUID>
"""
# Until typing.Literal is available on all supported Python versions,
# `status` must be checked in-code.
if status is not None and status != "cancel":
raise ValueError("'cancel' is the only valid --status value.")
# Special cases:
# * If the user specifies a single empty string, replace [""] with []
# so all values currently set on the Run will be erased.
# * If the user specifies nothing, replace the default empty list with None
# to prevent erasure of the values currently set on the Run.
set_run_managers = [] if set_run_managers == [""] else (set_run_managers or None)
set_run_monitors = [] if set_run_monitors == [""] else (set_run_monitors or None)
set_tags = [] if set_tags and list(set_tags) == [""] else (set_tags or None)
fc = create_flows_client(CLIENT_ID, flows_endpoint, RUN_STATUS_SCOPE)
RequestRunner(
functools.partial(
fc.update_runs,
run_ids=run_ids,
# Run managers
add_run_managers=add_run_managers or None,
remove_run_managers=remove_run_managers or None,
set_run_managers=set_run_managers,
# Run monitors
add_run_monitors=add_run_monitors or None,
remove_run_monitors=remove_run_monitors or None,
set_run_monitors=set_run_monitors,
# Tags
add_tags=add_tags or None,
remove_tags=remove_tags or None,
set_tags=set_tags,
# Status
status=status,
),
format=output_format,
verbose=verbose,
).run_and_render()
if __name__ == "__main__":
app()
| 50,154 | 32.237243 | 88 |
py
|
simanneal
|
simanneal-master/setup.py
|
#!/usr/bin/env/python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
LONG_DESCRIPTION = """`simanneal` is a python implementation of the
[simulated annealing optimization](http://en.wikipedia.org/wiki/Simulated_annealing) technique.
Simulated annealing is used to find a close-to-optimal solution among an
extremely large (but finite) set of potential solutions. It is particularly
useful for [combinatorial optimization](http://en.wikipedia.org/wiki/Combinatorial_optimization)
problems defined by complex objective functions that rely on external data.
"""
# Parse the version from the fiona module.
with open('simanneal/__init__.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
break
setup(
name='simanneal',
version=version,
description='Simulated Annealing in Python',
license='BSD',
author='Matthew Perry',
author_email='[email protected]',
url='https://github.com/perrygeo/simanneal',
long_description=LONG_DESCRIPTION,
packages=['simanneal'],
install_requires=[])
| 1,231 | 33.222222 | 96 |
py
|
simanneal
|
simanneal-master/examples/salesman.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import math
import random
from collections import defaultdict
from simanneal import Annealer
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R
class TravellingSalesmanProblem(Annealer):
"""Test annealer with a travelling salesman problem.
"""
# pass extra data (the distance matrix) into the constructor
def __init__(self, state, distance_matrix):
self.distance_matrix = distance_matrix
super(TravellingSalesmanProblem, self).__init__(state) # important!
def move(self):
"""Swaps two cities in the route."""
# no efficiency gain, just proof of concept
# demonstrates returning the delta energy (optional)
initial_energy = self.energy()
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
self.state[a], self.state[b] = self.state[b], self.state[a]
return self.energy() - initial_energy
def energy(self):
"""Calculates the length of the route."""
e = 0
for i in range(len(self.state)):
e += self.distance_matrix[self.state[i-1]][self.state[i]]
return e
if __name__ == '__main__':
# latitude and longitude for the twenty largest U.S. cities
cities = {
'New York City': (40.72, 74.00),
'Los Angeles': (34.05, 118.25),
'Chicago': (41.88, 87.63),
'Houston': (29.77, 95.38),
'Phoenix': (33.45, 112.07),
'Philadelphia': (39.95, 75.17),
'San Antonio': (29.53, 98.47),
'Dallas': (32.78, 96.80),
'San Diego': (32.78, 117.15),
'San Jose': (37.30, 121.87),
'Detroit': (42.33, 83.05),
'San Francisco': (37.78, 122.42),
'Jacksonville': (30.32, 81.70),
'Indianapolis': (39.78, 86.15),
'Austin': (30.27, 97.77),
'Columbus': (39.98, 82.98),
'Fort Worth': (32.75, 97.33),
'Charlotte': (35.23, 80.85),
'Memphis': (35.12, 89.97),
'Baltimore': (39.28, 76.62)
}
# initial state, a randomly-ordered itinerary
init_state = list(cities)
random.shuffle(init_state)
# create a distance matrix
distance_matrix = defaultdict(dict)
for ka, va in cities.items():
for kb, vb in cities.items():
distance_matrix[ka][kb] = 0.0 if kb == ka else distance(va, vb)
tsp = TravellingSalesmanProblem(init_state, distance_matrix)
tsp.set_schedule(tsp.auto(minutes=0.2))
# since our state is just a list, slice is the fastest way to copy
tsp.copy_strategy = "slice"
state, e = tsp.anneal()
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print()
print("%i mile route:" % e)
print(" ➞ ".join(state))
| 3,152 | 31.84375 | 81 |
py
|
simanneal
|
simanneal-master/examples/watershed/shapefile.py
|
"""
shapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20110927
version: 1.1.4
Compatible with Python versions 2.4-3.x
"""
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if type(args[0]) is type("stringTest"):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values
if shapeType in (13,15,18,23,25,28,31):
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so use the full list.
shapes = self.shapes()
return shapes[i]
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
try:
value = float(value)
except ValueError:
value = 0
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i),
record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (23,25,31):
try:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
try:
f.write(pack("<1d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
try:
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val:
record.append(val)
else:
record.append("")
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively."""
# TODO: Create a unique filename for target if None.
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif target:
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the module 'pyshp_usage.py'. This library was developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
| 38,106 | 36.993021 | 136 |
py
|
simanneal
|
simanneal-master/examples/watershed/watershed_condition.py
|
import math
import sys
import os
sys.path.append(os.path.abspath('../'))
from anneal import Annealer
import shapefile
import random
#-----------------------------------------------#
#-------------- Configuration ------------------#
#-----------------------------------------------#
shp = './data/huc6_4326.shp'
species = ['StlHd_m', 'Coho_m', 'Chnk_m']
targets = {
'StlHd_m': 511000,
'Coho_m': 521000,
'Chnk_m': 551000
}
# Note that these values will be added to watershed cost
# if the solution fails to meet targets
# the total cost of a solution thus depends on watershed cost
# plus all penalties.
penalties = {
'StlHd_m': 500,
'Coho_m': 400,
'Chnk_m': 300
}
costs = ['pcp80bdfmm', ]
uidfield = 'OBJECTID'
NUMREPS = 20
NUMITER = 30000
# Uncomment to manually define temperature schedule
# Otherwise, optimal schedule will be calculated
#
# SCHEDULE = {'tmin': 10, 'tmax': 6500, 'steps': 1}
#-----------------------------------------------#
watersheds = {}
def field_by_num(fieldname, fields):
fnames = [x[0] for x in fields]
return fnames.index(fieldname) - 1
print "Loading data from shapefile..."
sf = shapefile.Reader(shp)
fields = sf.fields
for rec in sf.records():
skip = False
vals = {}
for s in species:
vals[s] = rec[field_by_num(s, fields)]
# precalc costs
watershed_cost = 0
for c in costs:
fnum = field_by_num(c, fields)
watershed_cost += rec[fnum]
vals['watershed_cost'] = watershed_cost
if watershed_cost < 0.00001:
skip = True
if not skip:
watersheds[int(rec[field_by_num(uidfield, fields)])] = vals
# At this point, the `watersheds`variable should be a dictionary of watersheds
# where each watershed value is a dictionary of species and costs, e.g.
# {171003030703: {'Chnk_m': 11223.5, 'StlHd_m': 12263.7, 'Coho_m': 11359.1, 'watershed_cost': 1234},
hucs = watersheds.keys()
num_hucs = len(hucs)
def run(schedule=None):
state = []
def reserve_move(state):
"""
Select random watershed
then
Add watershed (if not already in state) OR remove it.
* This is the Marxan technique as well
"""
huc = hucs[int(random.random() * num_hucs)]
#huc = hucs[random.randint(0,num_hucs-1)]
if huc in state:
state.remove(huc)
else:
state.append(huc)
def reserve_energy(state):
"""
The "Objective Function"...
Calculates the 'energy' of the reserve.
Should incorporate costs of reserve and penalties
for not meeting species targets.
Note: This example is extremely simplistic compared to
the Marxan objective function (see Appendix B in Marxan manual)
but at least we have access to it!
"""
# Initialize variables
energy = 0
totals = {}
for fish in species:
totals[fish] = 0
# Get total cost and habitat in current state
for huc in state:
watershed = watersheds[huc]
# Sum up total habitat for each fish
for fish in species:
if energy == 0:
# reset for new calcs ie first watershed
totals[fish] = watershed[fish]
else:
# add for additional watersheds
totals[fish] += watershed[fish]
# Incorporate Cost of including watershed
energy += watershed['watershed_cost']
# incorporate penalties for missing species targets
for fish in species:
pct = totals[fish] / targets[fish]
if pct < 1.0: # if missed target, ie total < target
if pct < 0.1:
# Avoid zerodivision errors
# Limits the final to 10x specified penalty
pct = 0.1
penalty = int(penalties[fish] / pct)
energy += penalty
return energy
annealer = Annealer(reserve_energy, reserve_move)
if schedule is None:
print '----\nAutomatically determining optimal temperature schedule'
schedule = annealer.auto(state, minutes=6)
try:
schedule['steps'] = NUMITER
except:
pass # just keep the auto one
print '---\nAnnealing from %.2f to %.2f over %i steps:' % (schedule['tmax'],
schedule['tmin'], schedule['steps'])
state, e = annealer.anneal(state, schedule['tmax'], schedule['tmin'],
schedule['steps'], updates=6)
print "Reserve cost = %r" % reserve_energy(state)
state.sort()
for watershed in state:
print "\t", watershed, watersheds[watershed]
return state, reserve_energy(state), schedule
if __name__ == "__main__":
freq = {}
states = []
try:
schedule = SCHEDULE
except:
schedule = None
for i in range(NUMREPS):
state, energy, schedule = run(schedule)
states.append((state, energy))
for w in state:
if freq.has_key(w):
freq[w] += 1
else:
freq[w] = 1
print
print "States"
for s in states:
print s
print
print "Frequency of hit (max of %s reps)..." % NUMREPS
ks = freq.keys()
ks.sort()
for k in ks:
v = freq[k]
print k, "#"*int(v), v
| 5,505 | 27.677083 | 105 |
py
|
simanneal
|
simanneal-master/tests/test_anneal.py
|
import random
import sys
import time
from helper import distance, cities, distance_matrix
from simanneal import Annealer
if sys.version_info.major >= 3: # pragma: no cover
from io import StringIO
else:
from StringIO import StringIO
class TravellingSalesmanProblem(Annealer):
"""Test annealer with a travelling salesman problem.
"""
# pass extra data (the distance matrix) into the constructor
def __init__(self, distance_matrix, initial_state=None, load_state=None):
self.distance_matrix = distance_matrix
super(TravellingSalesmanProblem, self).__init__(
initial_state=initial_state, load_state=load_state)
def move(self):
"""Swaps two cities in the route."""
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
self.state[a], self.state[b] = self.state[b], self.state[a]
def energy(self):
"""Calculates the length of the route."""
e = 0
for i in range(len(self.state)):
e += self.distance_matrix[self.state[i - 1]][self.state[i]]
return e
def test_tsp_example():
# initial state, a randomly-ordered itinerary
init_state = list(cities.keys())
random.shuffle(init_state)
tsp = TravellingSalesmanProblem(distance_matrix, initial_state=init_state)
# since our state is just a list, slice is the fastest way to copy
tsp.copy_strategy = "slice"
tsp.steps = 50000
state, e = tsp.anneal()
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
assert len(state) == len(cities)
def test_auto():
# initial state, a randomly-ordered itinerary
init_state = list(cities.keys())
random.shuffle(init_state)
tsp = TravellingSalesmanProblem(distance_matrix, initial_state=init_state)
# since our state is just a list, slice is the fastest way to copy
tsp.copy_strategy = "slice"
auto_schedule = tsp.auto(minutes=0.05)
tsp.set_schedule(auto_schedule)
assert tsp.Tmax == auto_schedule['tmax']
assert tsp.Tmin == auto_schedule['tmin']
assert tsp.steps == auto_schedule['steps']
assert tsp.updates == auto_schedule['updates']
def test_save_load_state(tmpdir):
# initial state, a randomly-ordered itinerary
init_state = list(cities.keys())
random.shuffle(init_state)
tsp = TravellingSalesmanProblem(distance_matrix, initial_state=init_state)
tsp.copy_strategy = "slice"
statefile = str(tmpdir.join("state.pickle"))
tsp.save_state(fname=statefile)
init_state2 = init_state[1:] + init_state[:1]
tsp2 = TravellingSalesmanProblem(distance_matrix,
initial_state=init_state2)
tsp2.load_state(fname=statefile)
assert tsp.state == tsp2.state
def test_load_state_init(tmpdir):
# initial state, a randomly-ordered itinerary
init_state = list(cities.keys())
random.shuffle(init_state)
tsp = TravellingSalesmanProblem(distance_matrix, initial_state=init_state)
tsp.copy_strategy = "slice"
statefile = str(tmpdir.join("state.pickle"))
tsp.save_state(fname=statefile)
tsp2 = TravellingSalesmanProblem(distance_matrix, load_state=statefile)
assert tsp.state == tsp2.state
def test_default_update_formatting():
init_state = list(cities.keys())
tsp = TravellingSalesmanProblem(distance_matrix, initial_state=init_state)
# fix the start time and patch time.time() to give predictable Elapsed and Remaining times
tsp.start = 1.0
time.time = lambda: 9.0
# for step=0, the output should be column headers followed by partial data
sys.stderr = StringIO()
tsp.default_update(0, 1, 2, 3, 4)
output = sys.stderr.getvalue().split('\n')
assert 3 == len(output)
assert ' Temperature Energy Accept Improve Elapsed Remaining' == output[1]
assert '\r 1.00000 2.00 0:00:08 ' == output[2]
# when step>0, default_update should use \r to overwrite the previous data
sys.stderr = StringIO()
tsp.default_update(10, 1, 2, 3, 4)
output = sys.stderr.getvalue().split('\n')
assert 1 == len(output)
assert '\r 1.00000 2.00 300.00% 400.00% 0:00:08 11:06:32' == output[0]
| 4,323 | 32.007634 | 98 |
py
|
simanneal
|
simanneal-master/tests/helper.py
|
import math
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R
cities = {
'New York City': (40.72, 74.00),
'Los Angeles': (34.05, 118.25),
'Chicago': (41.88, 87.63),
'Houston': (29.77, 95.38),
'Phoenix': (33.45, 112.07),
'Philadelphia': (39.95, 75.17),
'San Antonio': (29.53, 98.47),
'Dallas': (32.78, 96.80),
'San Diego': (32.78, 117.15),
'San Jose': (37.30, 121.87),
'Detroit': (42.33, 83.05),
'San Francisco': (37.78, 122.42),
'Jacksonville': (30.32, 81.70),
'Indianapolis': (39.78, 86.15),
'Austin': (30.27, 97.77),
'Columbus': (39.98, 82.98),
'Fort Worth': (32.75, 97.33),
'Charlotte': (35.23, 80.85),
'Memphis': (35.12, 89.97),
'Baltimore': (39.28, 76.62)
}
# create a distance matrix
distance_matrix = {}
for ka, va in cities.items():
distance_matrix[ka] = {}
for kb, vb in cities.items():
if kb == ka:
distance_matrix[ka][kb] = 0.0
else:
distance_matrix[ka][kb] = distance(va, vb)
| 1,354 | 29.111111 | 81 |
py
|
simanneal
|
simanneal-master/simanneal/__init__.py
|
from __future__ import absolute_import
from .anneal import Annealer
__all__ = ['Annealer']
__version__ = "0.5.0"
| 114 | 18.166667 | 38 |
py
|
simanneal
|
simanneal-master/simanneal/anneal.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import copy
import datetime
import math
import pickle
import random
import signal
import sys
import time
def round_figures(x, n):
"""Returns x rounded to n significant figures."""
return round(x, int(n - math.ceil(math.log10(abs(x)))))
def time_string(seconds):
"""Returns time in seconds as a string formatted HHHH:MM:SS."""
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return '%4i:%02i:%02i' % (h, m, s)
class Annealer(object):
"""Performs simulated annealing by calling functions to calculate
energy and make moves on a state. The temperature schedule for
annealing may be provided manually or estimated automatically.
"""
__metaclass__ = abc.ABCMeta
# defaults
Tmax = 25000.0
Tmin = 2.5
steps = 50000
updates = 100
copy_strategy = 'deepcopy'
user_exit = False
save_state_on_exit = False
# placeholders
best_state = None
best_energy = None
start = None
def __init__(self, initial_state=None, load_state=None):
if initial_state is not None:
self.state = self.copy_state(initial_state)
elif load_state:
self.load_state(load_state)
else:
raise ValueError('No valid values supplied for neither \
initial_state nor load_state')
signal.signal(signal.SIGINT, self.set_user_exit)
def save_state(self, fname=None):
"""Saves state to pickle"""
if not fname:
date = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss")
fname = date + "_energy_" + str(self.energy()) + ".state"
with open(fname, "wb") as fh:
pickle.dump(self.state, fh)
def load_state(self, fname=None):
"""Loads state from pickle"""
with open(fname, 'rb') as fh:
self.state = pickle.load(fh)
@abc.abstractmethod
def move(self):
"""Create a state change"""
pass
@abc.abstractmethod
def energy(self):
"""Calculate state's energy"""
pass
def set_user_exit(self, signum, frame):
"""Raises the user_exit flag, further iterations are stopped
"""
self.user_exit = True
def set_schedule(self, schedule):
"""Takes the output from `auto` and sets the attributes
"""
self.Tmax = schedule['tmax']
self.Tmin = schedule['tmin']
self.steps = int(schedule['steps'])
self.updates = int(schedule['updates'])
def copy_state(self, state):
"""Returns an exact copy of the provided state
Implemented according to self.copy_strategy, one of
* deepcopy: use copy.deepcopy (slow but reliable)
* slice: use list slices (faster but only works if state is list-like)
* method: use the state's copy() method
"""
if self.copy_strategy == 'deepcopy':
return copy.deepcopy(state)
elif self.copy_strategy == 'slice':
return state[:]
elif self.copy_strategy == 'method':
return state.copy()
else:
raise RuntimeError('No implementation found for ' +
'the self.copy_strategy "%s"' %
self.copy_strategy)
def update(self, *args, **kwargs):
"""Wrapper for internal update.
If you override the self.update method,
you can chose to call the self.default_update method
from your own Annealer.
"""
self.default_update(*args, **kwargs)
def default_update(self, step, T, E, acceptance, improvement):
"""Default update, outputs to stderr.
Prints the current temperature, energy, acceptance rate,
improvement rate, elapsed time, and remaining time.
The acceptance rate indicates the percentage of moves since the last
update that were accepted by the Metropolis algorithm. It includes
moves that decreased the energy, moves that left the energy
unchanged, and moves that increased the energy yet were reached by
thermal excitation.
The improvement rate indicates the percentage of moves since the
last update that strictly decreased the energy. At high
temperatures it will include both moves that improved the overall
state and moves that simply undid previously accepted moves that
increased the energy by thermal excititation. At low temperatures
it will tend toward zero as the moves that can decrease the energy
are exhausted and moves that would increase the energy are no longer
thermally accessible."""
elapsed = time.time() - self.start
if step == 0:
print('\n Temperature Energy Accept Improve Elapsed Remaining',
file=sys.stderr)
print('\r{Temp:12.5f} {Energy:12.2f} {Elapsed:s} '
.format(Temp=T,
Energy=E,
Elapsed=time_string(elapsed)),
file=sys.stderr, end="")
sys.stderr.flush()
else:
remain = (self.steps - step) * (elapsed / step)
print('\r{Temp:12.5f} {Energy:12.2f} {Accept:7.2%} {Improve:7.2%} {Elapsed:s} {Remaining:s}'
.format(Temp=T,
Energy=E,
Accept=acceptance,
Improve=improvement,
Elapsed=time_string(elapsed),
Remaining=time_string(remain)),
file=sys.stderr, end="")
sys.stderr.flush()
def anneal(self):
"""Minimizes the energy of a system by simulated annealing.
Parameters
state : an initial arrangement of the system
Returns
(state, energy): the best state and energy found.
"""
step = 0
self.start = time.time()
# Precompute factor for exponential cooling from Tmax to Tmin
if self.Tmin <= 0.0:
raise Exception('Exponential cooling requires a minimum "\
"temperature greater than zero.')
Tfactor = -math.log(self.Tmax / self.Tmin)
# Note initial state
T = self.Tmax
E = self.energy()
prevState = self.copy_state(self.state)
prevEnergy = E
self.best_state = self.copy_state(self.state)
self.best_energy = E
trials = accepts = improves = 0
if self.updates > 0:
updateWavelength = self.steps / self.updates
self.update(step, T, E, None, None)
# Attempt moves to new states
while step < self.steps and not self.user_exit:
step += 1
T = self.Tmax * math.exp(Tfactor * step / self.steps)
dE = self.move()
if dE is None:
E = self.energy()
dE = E - prevEnergy
else:
E += dE
trials += 1
if dE > 0.0 and math.exp(-dE / T) < random.random():
# Restore previous state
self.state = self.copy_state(prevState)
E = prevEnergy
else:
# Accept new state and compare to best state
accepts += 1
if dE < 0.0:
improves += 1
prevState = self.copy_state(self.state)
prevEnergy = E
if E < self.best_energy:
self.best_state = self.copy_state(self.state)
self.best_energy = E
if self.updates > 1:
if (step // updateWavelength) > ((step - 1) // updateWavelength):
self.update(
step, T, E, accepts / trials, improves / trials)
trials = accepts = improves = 0
self.state = self.copy_state(self.best_state)
if self.save_state_on_exit:
self.save_state()
# Return best state and energy
return self.best_state, self.best_energy
def auto(self, minutes, steps=2000):
"""Explores the annealing landscape and
estimates optimal temperature settings.
Returns a dictionary suitable for the `set_schedule` method.
"""
def run(T, steps):
"""Anneals a system at constant temperature and returns the state,
energy, rate of acceptance, and rate of improvement."""
E = self.energy()
prevState = self.copy_state(self.state)
prevEnergy = E
accepts, improves = 0, 0
for _ in range(steps):
dE = self.move()
if dE is None:
E = self.energy()
dE = E - prevEnergy
else:
E = prevEnergy + dE
if dE > 0.0 and math.exp(-dE / T) < random.random():
self.state = self.copy_state(prevState)
E = prevEnergy
else:
accepts += 1
if dE < 0.0:
improves += 1
prevState = self.copy_state(self.state)
prevEnergy = E
return E, float(accepts) / steps, float(improves) / steps
step = 0
self.start = time.time()
# Attempting automatic simulated anneal...
# Find an initial guess for temperature
T = 0.0
E = self.energy()
self.update(step, T, E, None, None)
while T == 0.0:
step += 1
dE = self.move()
if dE is None:
dE = self.energy() - E
T = abs(dE)
# Search for Tmax - a temperature that gives 98% acceptance
E, acceptance, improvement = run(T, steps)
step += steps
while acceptance > 0.98:
T = round_figures(T / 1.5, 2)
E, acceptance, improvement = run(T, steps)
step += steps
self.update(step, T, E, acceptance, improvement)
while acceptance < 0.98:
T = round_figures(T * 1.5, 2)
E, acceptance, improvement = run(T, steps)
step += steps
self.update(step, T, E, acceptance, improvement)
Tmax = T
# Search for Tmin - a temperature that gives 0% improvement
while improvement > 0.0:
T = round_figures(T / 1.5, 2)
E, acceptance, improvement = run(T, steps)
step += steps
self.update(step, T, E, acceptance, improvement)
Tmin = T
# Calculate anneal duration
elapsed = time.time() - self.start
duration = round_figures(int(60.0 * minutes * step / elapsed), 2)
# Don't perform anneal, just return params
return {'tmax': Tmax, 'tmin': Tmin, 'steps': duration, 'updates': self.updates}
| 11,213 | 34.6 | 111 |
py
|
EASIER
|
EASIER-master/easier-dataAnalyst/src/myfunction.py
|
#myfunction.py
import numpy as np
from math import pow, sqrt
from operator import itemgetter
def norm2perfq(min_perfq, max_perfq, value, min, max):
return min_perfq + (max_perfq - min_perfq) * (value - min) / (max - min)
def getpoint(data, index):
try:
return data[np.where(data[:, 0] == index)[0][0], 1:4]
except IndexError:
print(index)
return None
def distance(a, b):
return sqrt(pow((a[0] - b[0]), 2) + pow((a[1] - b[1]), 2) + pow((a[2] - b[2]), 2))
def distance_from_perfq(a, b):
return abs(a[1] - b[1])
def remove_pareto_solutions(data, pareto):
return {sol[0]: min([distance(sol[1:4], getpoint(data, par)) for par in pareto]) for sol in data}
def calculate_distance_from_pareto(data, pareto):
return {sol[0]: min([distance(sol[1:4], getpoint(data, par)) for par in pareto]) for sol in data}
def scatter_near_solution(ax, data, pareto, m, c, label='', delta=5):
distance_from_pareto = calculate_distance_from_pareto(data, pareto)
neighbours = {k: v for k, v in distance_from_pareto.items() if v != 0 and v <= delta}
matrix = np.matrix([getpoint(data, sol) for sol in neighbours])
if matrix.size == 0:
print('No close solutions in delta = '+str(delta))
else:
ax.scatter(matrix[:, 0], matrix[:, 1], matrix[:, 2], c=c, marker=m, label=label)
def plot_pareto(ax, data, pareto, m, c, l):
# ax.plot(pareto[:, 0], pareto[:, 1], pareto[:, 2], c=color, marker=marker)
matrix = get_sorted_matrix(data, pareto)
ax.scatter(matrix[:, 0], matrix[:, 1], matrix[:, 2], c=c, marker=m)
ax.plot(matrix[:, 0].A1, matrix[:, 1].A1, matrix[:, 2].A1, c=c, label=l)
def get_sorted_matrix(data, sol):
return np.matrix(sorted([getpoint(data, s) for s in sol], key=itemgetter(0)))
| 1,787 | 30.928571 | 101 |
py
|
EASIER
|
EASIER-master/easier-dataAnalyst/src/easier-lengths.py
|
# 0 - solution number,
# 1 - PA,
# 2 - PerfQ,
# 3 - Dist
import numpy as np
import myfunction
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
base_dir = '../data/'
# Near-Pareto frontier from a chromosome long 4
pareto_folder_l4 = base_dir + 'FTA/length_4/pareto/'
file_l4 = pareto_folder_l4 + 'P_128_E_720_X_0.8_M_0.2_solutions.csv'
pareto_l4 = [1155, 1211, 1240, 208, 484, 938]
data_l4 = np.loadtxt(open(file_l4, "rb"), delimiter=";") # , skiprows=1)
min_pa_l4 = min(data_l4[:, 1])
max_pa_l4 = max(data_l4[:, 1])
min_perfq_l4 = min(data_l4[:, 2])
max_perfq_l4 = max(data_l4[:, 2])
min_dist_l4 = min(data_l4[:, 3])
max_dist_l4 = max(data_l4[:, 3])
# Near-Pareto frontier from a chromosome long 6
pareto_folder_l6 = base_dir + 'FTA/length_6/pareto/'
file_l6 = pareto_folder_l6 + 'P_128_E_720_X_0.8_M_0.2_solutions.csv'
pareto_l6 = [1100, 1174, 1254, 298, 457, 613, 935, 961]
data_l6 = np.loadtxt(open(file_l6, "rb"), delimiter=";") # , skiprows=1)
min_pa_l6 = min(data_l6[:, 1])
max_pa_l6 = max(data_l6[:, 1])
min_perfq_l6 = min(data_l6[:, 2])
max_perfq_l6 = max(data_l6[:, 2])
min_dist_l6 = min(data_l6[:, 3])
max_dist_l6 = max(data_l6[:, 3])
# Near-Pareto frontier from a chromosome long 8
pareto_folder_l8 = base_dir + 'FTA/length_8/pareto/'
file_l8 = pareto_folder_l8 + 'P_128_E_720_X_0.8_M_0.2_solutions.csv'
pareto_l8 = [1056, 1113, 1163, 584]
data_l8 = np.loadtxt(open(file_l8, "rb"), delimiter=";") # , skiprows=1)
min_pa_l8 = min(data_l8[:, 1])
max_pa_l8 = max(data_l8[:, 1])
min_perfq_l8 = min(data_l8[:, 2])
max_perfq_l8 = max(data_l8[:, 2])
min_dist_l8 = min(data_l8[:, 3])
max_dist_l8 = max(data_l8[:, 3])
# Near-Pareto frontier from a chromosome long 8
pareto_folder_l10 = base_dir + 'FTA/length_10/pareto/'
file_l10 = pareto_folder_l10 + 'P_128_E_720_X_0.8_M_0.2_solutions.csv'
pareto_l10 = [1054, 1128, 1156, 1170, 1179, 222, 419, 721]
data_l10 = np.loadtxt(open(file_l10, "rb"), delimiter=";") # , skiprows=1)
min_pa_l10 = min(data_l10[:, 1])
max_pa_l10 = max(data_l10[:, 1])
min_perfq_l10 = min(data_l10[:, 2])
max_perfq_l10 = max(data_l10[:, 2])
min_dist_l10 = min(data_l10[:, 3])
max_dist_l10 = max(data_l10[:, 3])
# normalize PA with respect to PerfQ
data_l4[:, 1] = myfunction.norm2perfq(min_perfq_l4, max_perfq_l4, data_l4[:, 1], min_pa_l4, max_pa_l4)
data_l6[:, 1] = myfunction.norm2perfq(min_perfq_l6, max_perfq_l6, data_l6[:, 1], min_pa_l6, max_pa_l6)
data_l8[:, 1] = myfunction.norm2perfq(min_perfq_l8, max_perfq_l8, data_l8[:, 1], min_pa_l8, max_pa_l8)
data_l10[:, 1] = myfunction.norm2perfq(min_perfq_l10, max_perfq_l10, data_l10[:, 1], min_pa_l10, max_pa_l10)
# normalize Dist with respect to PerfQ
data_l4[:,3] = myfunction.norm2perfq(min_perfq_l4, max_perfq_l4, data_l4[:,3], min_dist_l4, max_dist_l4)
data_l6[:,3] = myfunction.norm2perfq(min_perfq_l6, max_perfq_l6, data_l6[:,3], min_dist_l6, max_dist_l6)
data_l8[:,3] = myfunction.norm2perfq(min_perfq_l8, max_perfq_l8, data_l8[:,3], min_dist_l8, max_dist_l8)
data_l10[:,3] = myfunction.norm2perfq(min_perfq_l10, max_perfq_l10, data_l10[:,3], min_dist_l10, max_dist_l10)
# scale values
for i in range(1, 3):
# mean-center data
data_l4[:, i] -= data_l4[:, i].mean()
data_l6[:, i] -= data_l6[:, i].mean()
data_l8[:, i] -= data_l8[:, i].mean()
data_l10[:, i] -= data_l10[:, i].mean()
# divide by the standard deviation
data_l4[:, i] /= data_l4[:, i].std()
data_l6[:, i] /= data_l6[:, i].std()
data_l8[:, i] /= data_l8[:, i].std()
data_l10[:, i] /= data_l10[:, i].std()
# compute distances from pareto solutions
# plot the number of solution as delta increases
# sol_by_delta = [len({k:v for k,v in distances.items() if v <= delta}) for delta in np.arange(0.0, max(distances), 0.01)]
# max_by_delta = sol_by_delta.index(max(sol_by_delta))
# fig = plt.figure(figsize=(20, 15))
# plt.plot(np.arange(0.0, max_by_delta*0.01, 0.01), sol_by_delta[:max_by_delta])
# plt.grid(True)
# plt.show()
# remove solutions more distant than delta
# delta = max(distances)
delta = 0.038
fig = plt.figure(figsize=(15, 10))
fig.suptitle('easier_lengths', fontsize=16)
ax = fig.add_subplot(111, projection='3d')
ax.view_init(25, 15)
ax.set_xlabel('PA')
ax.set_ylabel('PerfQ')
ax.set_zlabel('Dist')
# pareto solutions and close solutions (delta)
myfunction.plot_pareto(ax, data_l4, pareto_l4, 's', '#000000', 'pareto_plot_l4')
myfunction.plot_pareto(ax, data_l6, pareto_l6, '^', '#000099', 'pareto_plot_l6')
myfunction.plot_pareto(ax, data_l8, pareto_l8, 'o', '#66FF11', 'pareto_plot_l8')
myfunction.plot_pareto(ax, data_l10, pareto_l10, 'x', 'r', 'pareto_plot_l10')
# myfunction.scatter_near_solution(ax, data_l4, pareto_l4, 's', '#000000', delta)
# myfunction.scatter_near_solution(ax, data_l6, pareto_l6, '^', '#000099', delta)
# myfunction.scatter_near_solution(ax, data_l8, pareto_l8, 'o', '#66FF11', delta)
# myfunction.scatter_near_solution(ax, data_l10, pareto_l10, 'x', 'r', delta)
ax.legend()
plt.show()
| 4,969 | 38.444444 | 122 |
py
|
EASIER
|
EASIER-master/easier-dataAnalyst/src/easier_population.py
|
#easier_long
import numpy as np
import myfunction
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
base_dir = '../data/'
# Near-Pareto frontier
pareto_folder_64_E1280 = base_dir + 'FTA/64/lenght_4/pareto/P_64_E_1280_X_0.8_M_0.2__18.10.03.11.03.58__/'
file_64_E1280 = pareto_folder_64_E1280 + 'P_64_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_64_E1280 = [1004, 707, 801]
data_64_E1280 = np.loadtxt(open(file_64_E1280, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_E1280 = min(data_64_E1280[:, 1])
max_pa_64_E1280 = max(data_64_E1280[:, 1])
min_perfq_64_E1280 = min(data_64_E1280[:, 2])
max_perfq_64_E1280 = max(data_64_E1280[:, 2])
min_dist_64_E1280 = min(data_64_E1280[:, 3])
max_dist_64_E1280 = max(data_64_E1280[:, 3])
# Near-Pareto frontier
pareto_folder_128_E1280 = base_dir + 'FTA/128/lenght_4/pareto/P_128_E_1280_X_0.8_M_0.2__18.10.03.13.22.43__/'
file_128_E1280 = pareto_folder_128_E1280 + 'P_128_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_128_E1280 = [114, 1209, 1797, 186, 1868, 2189, 357]
data_128_E1280 = np.loadtxt(open(file_128_E1280, "rb"), delimiter=";") # , skiprows=1)
min_pa_128_E1280= min(data_128_E1280[:, 1])
max_pa_128_E1280 = max(data_128_E1280[:, 1])
min_perfq_128_E1280 = min(data_128_E1280[:, 2])
max_perfq_128_E1280 = max(data_128_E1280[:, 2])
min_dist_128_E1280 = min(data_128_E1280[:, 3])
max_dist_128_E1280 = max(data_128_E1280[:, 3])
# Near-Pareto frontier
pareto_folder_256_E1280 = base_dir + 'FTA/256/lenght_4/pareto/P_256_E_1280_X_0.8_M_0.2__18.10.03.15.19.03__/'
file_256_E1280 = pareto_folder_256_E1280 + 'P_256_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_256_E1280 = [1210, 1376, 1468, 1545, 1685, 1749, 1998, 973]
data_256_E1280 = np.loadtxt(open(file_256_E1280, "rb"), delimiter=";") # , skiprows=1)
min_pa_256_E1280= min(data_256_E1280[:, 1])
max_pa_256_E1280 = max(data_256_E1280[:, 1])
min_perfq_256_E1280 = min(data_256_E1280[:, 2])
max_perfq_256_E1280 = max(data_256_E1280[:, 2])
min_dist_256_E1280 = min(data_256_E1280[:, 3])
max_dist_256_E1280 = max(data_256_E1280[:, 3])
# Near-Pareto frontier
pareto_folder_512_E1280 = base_dir + 'FTA/512/lenght_4/pareto/P_512_E_1280_X_0.8_M_0.2__18.10.03.17.05.38__/'
file_512_E1280 = pareto_folder_512_E1280 + 'P_512_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_512_E1280 = [1105, 1348, 1738, 2096, 930]
data_512_E1280 = np.loadtxt(open(file_512_E1280, "rb"), delimiter=";") # , skiprows=1)
min_pa_512_E1280= min(data_256_E1280[:, 1])
max_pa_512_E1280 = max(data_256_E1280[:, 1])
min_perfq_512_E1280 = min(data_256_E1280[:, 2])
max_perfq_512_E1280 = max(data_256_E1280[:, 2])
min_dist_512_E1280 = min(data_256_E1280[:, 3])
max_dist_512_E1280 = max(data_256_E1280[:, 3])
# normalize PA with respect to PerfQ
#data_64_E1280[:, 1] = myfunction.norm2perfq(min_perfq_64_E1280, max_perfq_64_E1280, data_64_E1280[:, 1], min_pa_64_E1280, max_pa_64_E1280)
# normalize Dist with respect to PerfQ
#data_64_E1280[:,3] = myfunction.norm2perfq(min_perfq_64_E1280, max_perfq_64_E1280, data_64_E1280[:,3], min_dist_64_E1280, max_dist_64_E1280)
# scale values
for i in range(1, 3):
# mean-center data
data_64_E1280[:, i] -= data_64_E1280[:, i].mean()
data_128_E1280[:, i] -= data_128_E1280[:, i].mean()
data_256_E1280[:, i] -= data_256_E1280[:, i].mean()
data_512_E1280[:, i] -= data_512_E1280[:, i].mean()
# divide by the standard deviation
data_64_E1280[:, i] /= data_64_E1280[:, i].std()
data_128_E1280[:, i] /= data_128_E1280[:, i].std()
data_256_E1280[:, i] /= data_256_E1280[:, i].std()
data_512_E1280[:, i] /= data_512_E1280[:, i].std()
# delta = max(distances)
delta = 0.2
fig = plt.figure(figsize=(30, 20))
fig.suptitle('easier_popolation', fontsize=16)
ax = fig.add_subplot(111, projection='3d')
ax.view_init(25, 15)
ax.set_xlabel('PA')
ax.set_ylabel('PerfQ')
ax.set_zlabel('Dist')
# pareto solutions and close solutions (delta)
myfunction.plot_pareto(ax, data_64_E1280, pareto_64_E1280, 's', 'r', 'pareto_plot_64_E1280')
# myfunction.scatter_near_solution(ax, data_64_E1280, pareto_64_E1280, 's', '#000000', 'solutions_64_E1280', delta)
myfunction.plot_pareto(ax, data_128_E1280, pareto_128_E1280, 'o', 'green', 'pareto_plot_128_E1280')
# myfunction.scatter_near_solution(ax, data_128_E1280, pareto_128_E1280, 'o', 'b', 'solutions_128_E1280', delta)
myfunction.plot_pareto(ax, data_256_E1280, pareto_256_E1280, '^', 'b', 'pareto_plot_256_E1280')
# myfunction.scatter_near_solution(ax, data_256_E1280, pareto_256_E1280, '^', 'green', 'solutions_256_E1280', delta)
myfunction.plot_pareto(ax, data_512_E1280, pareto_512_E1280, '+', '#ff9933', 'pareto_plot_512_E1280')
# myfunction.scatter_near_solution(ax, data_512_E1280, pareto_512_E1280, '+', 'grey', 'solutions_512_E1280', delta)
ax.legend()
plt.show()
| 4,750 | 42.990741 | 141 |
py
|
EASIER
|
EASIER-master/easier-dataAnalyst/src/easier_long.py
|
#easier_long
import numpy as np
import myfunction
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
base_dir = '../data/'
# Near-Pareto frontier from a chromosome long 4
pareto_folder_64_l4 = base_dir + 'FTA/64/lenght_4/pareto/P_64_E_1280_X_0.8_M_0.2__18.10.03.11.03.58__/'
file_64_l4 = pareto_folder_64_l4 + 'P_64_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_64_l4 = [1004, 707, 801]
data_64_l4 = np.loadtxt(open(file_64_l4, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_l4 = min(data_64_l4[:, 1])
max_pa_64_l4 = max(data_64_l4[:, 1])
min_perfq_64_l4 = min(data_64_l4[:, 2])
max_perfq_64_l4 = max(data_64_l4[:, 2])
min_dist_64_l4 = min(data_64_l4[:, 3])
max_dist_64_l4 = max(data_64_l4[:, 3])
# Near-Pareto frontier from a chromosome long 6
pareto_folder_64_l6 = base_dir + 'FTA/64/lenght_6/pareto/P_64_E_1280_X_0.8_M_0.2__18.10.03.19.25.59__/'
file_64_l6 = pareto_folder_64_l6 + 'P_64_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_64_l6 = [1386, 1405, 1615, 1760, 355, 775]
data_64_l6 = np.loadtxt(open(file_64_l6, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_l6= min(data_64_l6[:, 1])
max_pa_64_l6 = max(data_64_l6[:, 1])
min_perfq_64_l6 = min(data_64_l6[:, 2])
max_perfq_64_l6 = max(data_64_l6[:, 2])
min_dist_64_l6 = min(data_64_l6[:, 3])
max_dist_64_l6 = max(data_64_l6[:, 3])
# Near-Pareto frontier from a chromosome long 8
pareto_folder_64_l8 = base_dir + 'FTA/64/lenght_8/pareto/P_64_E_1280_X_0.8_M_0.2__18.10.04.06.45.58__/'
file_64_l8 = pareto_folder_64_l8 + 'P_64_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_64_l8 = [1238, 1409, 1603, 2046, 2246, 843]
data_64_l8 = np.loadtxt(open(file_64_l8, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_l8= min(data_64_l8[:, 1])
max_pa_64_l8 = max(data_64_l8[:, 1])
min_perfq_64_l8 = min(data_64_l8[:, 2])
max_perfq_64_l8 = max(data_64_l8[:, 2])
min_dist_64_l8 = min(data_64_l8[:, 3])
max_dist_64_l8 = max(data_64_l8[:, 3])
# normalize PA with respect to PerfQ
#data_64_l4[:, 1] = myfunction.norm2perfq(min_perfq_64_l4, max_perfq_64_l4, data_64_l4[:, 1], min_pa_64_l4, max_pa_64_l4)
# normalize Dist with respect to PerfQ
#data_64_l4[:,3] = myfunction.norm2perfq(min_perfq_64_l4, max_perfq_64_l4, data_64_l4[:,3], min_dist_64_l4, max_dist_64_l4)
# scale values
for i in range(1, 3):
# mean-center data
data_64_l4[:, i] -= data_64_l4[:, i].mean()
data_64_l6[:, i] -= data_64_l6[:, i].mean()
data_64_l8[:, i] -= data_64_l8[:, i].mean()
# divide by the standard deviation
data_64_l4[:, i] /= data_64_l4[:, i].std()
data_64_l6[:, i] /= data_64_l6[:, i].std()
data_64_l8[:, i] /= data_64_l8[:, i].std()
# delta = max(distances)
delta = 0.2
fig = plt.figure(figsize=(30, 20))
fig.suptitle('easier_long', fontsize=16)
ax = fig.add_subplot(111, projection='3d')
ax.view_init(25, 15)
ax.set_xlabel('PA')
ax.set_ylabel('PerfQ')
ax.set_zlabel('Dist')
# pareto solutions and close solutions (delta)
myfunction.plot_pareto(ax, data_64_l4, pareto_64_l4, 's', 'r', 'pareto_plot_64_l4')
myfunction.scatter_near_solution(ax, data_64_l4, pareto_64_l4, 's', '#000000', 'solutions_64_l4', delta)
myfunction.plot_pareto(ax, data_64_l6, pareto_64_l6, 'o', 'green', 'pareto_plot_64_l6')
myfunction.scatter_near_solution(ax, data_64_l6, pareto_64_l6, 'o', 'b', 'solutions_64_l6', delta)
myfunction.plot_pareto(ax, data_64_l8, pareto_64_l8, '^', 'black', 'pareto_plot_64_l8')
myfunction.scatter_near_solution(ax, data_64_l8, pareto_64_l8, '^', 'black', 'solutions_64_l8', delta)
ax.legend()
plt.show()
| 3,488 | 37.340659 | 123 |
py
|
EASIER
|
EASIER-master/easier-dataAnalyst/src/easier_evolution.py
|
#easier_long
import numpy as np
import myfunction
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
base_dir = '../data/'
# Near-Pareto frontier from a chromosome long 4
pareto_folder_64_E1280 = base_dir + 'FTA/64/lenght_4/pareto/P_64_E_1280_X_0.8_M_0.2__18.10.03.11.03.58__/'
file_64_E1280 = pareto_folder_64_E1280 + 'P_64_E_1280_X_0.8_M_0.2_solutions.csv'
pareto_64_E1280 = [1004, 707, 801]
data_64_E1280 = np.loadtxt(open(file_64_E1280, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_E1280 = min(data_64_E1280[:, 1])
max_pa_64_E1280 = max(data_64_E1280[:, 1])
min_perfq_64_E1280 = min(data_64_E1280[:, 2])
max_perfq_64_E1280 = max(data_64_E1280[:, 2])
min_dist_64_E1280 = min(data_64_E1280[:, 3])
max_dist_64_E1280 = max(data_64_E1280[:, 3])
# Near-Pareto frontier from a chromosome long 6
pareto_folder_64_E1600 = base_dir + 'FTA/64/lenght_4/pareto/P_64_E_1600_X_0.8_M_0.2__18.10.03.11.47.23__/'
file_64_E1600 = pareto_folder_64_E1600 + 'P_64_E_1600_X_0.8_M_0.2_solutions.csv'
pareto_64_E1600 = [2027, 2048, 2155, 2225, 2819, 576]
data_64_E1600 = np.loadtxt(open(file_64_E1600, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_E1600= min(data_64_E1600[:, 1])
max_pa_64_E1600 = max(data_64_E1600[:, 1])
min_perfq_64_E1600 = min(data_64_E1600[:, 2])
max_perfq_64_E1600 = max(data_64_E1600[:, 2])
min_dist_64_E1600 = min(data_64_E1600[:, 3])
max_dist_64_E1600 = max(data_64_E1600[:, 3])
# Near-Pareto frontier from a chromosome long 8
pareto_folder_64_E960 = base_dir + 'FTA/64/lenght_4/pareto/P_64_E_960_X_0.8_M_0.2__18.10.03.10.16.12__/'
file_64_E960 = pareto_folder_64_E960 + 'P_64_E_960_X_0.8_M_0.2_solutions.csv'
pareto_64_E960 = [1026, 595, 706, 782]
data_64_E960 = np.loadtxt(open(file_64_E960, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_E960= min(data_64_E960[:, 1])
max_pa_64_E960 = max(data_64_E960[:, 1])
min_perfq_64_E960 = min(data_64_E960[:, 2])
max_perfq_64_E960 = max(data_64_E960[:, 2])
min_dist_64_E960 = min(data_64_E960[:, 3])
max_dist_64_E960 = max(data_64_E960[:, 3])
# Near-Pareto frontier from a chromosome long 8
pareto_folder_64_E640 = base_dir + 'FTA/64/lenght_4/pareto/P_64_E_640_X_0.8_M_0.2__18.10.03.09.49.28__/'
file_64_E640 = pareto_folder_64_E640 + 'P_64_E_640_X_0.8_M_0.2_solutions.csv'
pareto_64_E640 = [257, 803, 810, 815]
data_64_E640 = np.loadtxt(open(file_64_E640, "rb"), delimiter=";") # , skiprows=1)
min_pa_64_E640= min(data_64_E960[:, 1])
max_pa_64_E640 = max(data_64_E960[:, 1])
min_perfq_64_E640 = min(data_64_E960[:, 2])
max_perfq_64_E640 = max(data_64_E960[:, 2])
min_dist_64_E640 = min(data_64_E960[:, 3])
max_dist_64_E640 = max(data_64_E960[:, 3])
# normalize PA with respect to PerfQ
#data_64_E1280[:, 1] = myfunction.norm2perfq(min_perfq_64_E1280, max_perfq_64_E1280, data_64_E1280[:, 1], min_pa_64_E1280, max_pa_64_E1280)
# normalize Dist with respect to PerfQ
#data_64_E1280[:,3] = myfunction.norm2perfq(min_perfq_64_E1280, max_perfq_64_E1280, data_64_E1280[:,3], min_dist_64_E1280, max_dist_64_E1280)
# scale values
for i in range(1, 3):
# mean-center data
data_64_E1280[:, i] -= data_64_E1280[:, i].mean()
data_64_E1600[:, i] -= data_64_E1600[:, i].mean()
data_64_E960[:, i] -= data_64_E960[:, i].mean()
data_64_E640[:, i] -= data_64_E640[:, i].mean()
# divide by the standard deviation
data_64_E1280[:, i] /= data_64_E1280[:, i].std()
data_64_E1600[:, i] /= data_64_E1600[:, i].std()
data_64_E960[:, i] /= data_64_E960[:, i].std()
data_64_E640[:, i] /= data_64_E640[:, i].std()
# delta = max(distances)
delta = 0.2
fig = plt.figure(figsize=(30, 20))
fig.suptitle('easier_evolution', fontsize=16)
ax = fig.add_subplot(111, projection='3d')
ax.view_init(25, 15)
ax.set_xlabel('PA')
ax.set_ylabel('PerfQ')
ax.set_zlabel('Dist')
# pareto solutions and close solutions (delta)
myfunction.plot_pareto(ax, data_64_E640, pareto_64_E640, '+', '#ff9933', 'pareto_plot_64_E640')
# myfunction.scatter_near_solution(ax, data_64_E640, pareto_64_E640, '+', 'grey', 'solutions_64_E640', delta)
myfunction.plot_pareto(ax, data_64_E960, pareto_64_E960, '^', 'b', 'pareto_plot_64_E960')
# myfunction.scatter_near_solution(ax, data_64_E960, pareto_64_E960, '^', 'green', 'solutions_64_E960', delta)
myfunction.plot_pareto(ax, data_64_E1280, pareto_64_E1280, 's', 'r', 'pareto_plot_64_E1280')
# myfunction.scatter_near_solution(ax, data_64_E1280, pareto_64_E1280, 's', '#000000', 'solutions_64_E1280', delta)
myfunction.plot_pareto(ax, data_64_E1600, pareto_64_E1600, 'o', 'green', 'pareto_plot_64_E1600')
# myfunction.scatter_near_solution(ax, data_64_E1600, pareto_64_E1600, 'o', 'b', 'solutions_64_E1600', delta)
ax.legend()
plt.show()
| 4,659 | 41.363636 | 141 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/zlog.py
|
import os
import time
import torch
import traceback
from contextlib import contextmanager
from tensorboardX import SummaryWriter # maple
import jiant.utils.python.io as py_io
import jiant.utils.python.filesystem as filesystem
class BaseZLogger:
def log_context(self):
raise NotImplementedError()
def write_entry(self, key, entry):
raise NotImplementedError()
def write_obj(self, key, obj, entry):
raise NotImplementedError()
def flush(self):
raise NotImplementedError()
class ZLogger(BaseZLogger):
def __init__(self, fol_path, log_errors=True, overwrite=False):
self.fol_path = fol_path
self.log_errors = log_errors
self.overwrite = overwrite
self.write_mode = "w" if overwrite else "a"
os.makedirs(fol_path)
self.handles = {}
self.tb_writer = SummaryWriter(fol_path)
def __exit__(self, type, value, traceback):
self.tb_writer.close()
@contextmanager
def log_context(self):
try:
yield self
except Exception:
if self.log_errors:
self.write_entry("errors", traceback.format_exc())
raise
finally:
for f in self.handles.values():
f.close()
def write_entry(self, key, entry, do_print=False):
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
entry["TIMESTAMP"] = time.time()
self._write_entry_to_file(key=key, entry=entry)
if do_print:
print(entry)
if key in ['train_val', 'train_val_best']:
task = list(entry['train_state']['task_steps'].keys())[0]
self.tb_writer.add_scalar('%s/%s'%(task, key), entry['score'], entry['train_state']['global_steps'])
elif key == 'early_stopping':
pass
elif key == 'loss_train':
for e in entry:
if e.startswith('loss_'):
self.tb_writer.add_scalar('%s/%s'%(entry['task'], key), entry[e], entry['task_step'])
def write_obj(self, key, obj, entry):
assert "DATA" not in entry
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
time_stamp = time.time()
entry["DATA"] = self._save_obj(key, time_stamp, obj)
entry["TIMESTAMP"] = time_stamp
self._write_entry_to_file(key=key, entry=entry)
def _save_obj(self, key, time_stamp, obj):
cache_path = self.get_cache_path(key)
os.makedirs(cache_path, exist_ok=True)
save_path = os.path.join(cache_path, str(time_stamp))
torch.save(obj, save_path)
return save_path
def check_handle_open(self, key):
if key in self.handles:
return
handle_path = self.get_path(key)
py_io.create_containing_folder(handle_path)
self.handles[key] = open(handle_path, self.write_mode)
def get_path(self, key):
return os.path.join(self.fol_path, key + ".zlog")
def get_cache_path(self, key):
return os.path.join(self.fol_path, key + "___CACHE")
def flush(self, key=None):
if key is None:
for f in self.handles.values():
f.flush()
elif isinstance(key, list):
for k in key:
self.handles[k].flush()
else:
self.handles[key].flush()
self.tb_writer.flush()
def _write_entry_to_file(self, key, entry):
self.check_handle_open(key)
self.handles[key].write(py_io.to_jsonl(entry) + "\n")
class ZBufferedLogger(ZLogger):
def __init__(
self,
fol_path,
default_buffer_size=1,
buffer_size_dict=None,
log_errors=True,
overwrite=False,
):
super().__init__(fol_path=fol_path, log_errors=log_errors, overwrite=overwrite)
self.default_buffer_size = default_buffer_size
self.buffer_size_dict = buffer_size_dict.copy() if buffer_size_dict else {}
self.buffer_dict = {}
def check_handle_open(self, key):
super().check_handle_open(key=key)
if key not in self.buffer_dict:
self.buffer_dict[key] = []
if key not in self.buffer_size_dict:
self.buffer_size_dict[key] = self.default_buffer_size
def _write_entry_to_file(self, key, entry):
self.check_handle_open(key)
self.buffer_dict[key].append(entry)
if len(self.buffer_dict[key]) >= self.buffer_size_dict[key]:
self.flush(key)
def _write_buffer(self, key):
if not self.buffer_dict[key]:
return
self.handles[key].write(
"".join(py_io.to_jsonl(entry) + "\n" for entry in self.buffer_dict[key])
)
self.buffer_dict[key] = []
def flush(self, key=None):
if key is None:
for k, f in self.handles.items():
self._write_buffer(k)
f.flush()
elif isinstance(key, list):
for k in key:
self._write_buffer(k)
self.handles[k].flush()
else:
self._write_buffer(key)
self.handles[key].flush()
class _VoidZLogger(BaseZLogger):
def log_context(self):
yield
def write_entry(self, key, entry):
pass
def write_obj(self, key, obj, entry):
pass
def flush(self):
pass
class _PrintZLogger(BaseZLogger):
def log_context(self):
yield
def write_entry(self, key, entry):
print(f"{key}: {entry}")
def write_obj(self, key, obj, entry):
print(f"{key}: {obj}")
def flush(self):
pass
class InMemoryZLogger(BaseZLogger):
def __init__(self):
self.entries = {}
self.data = {}
def log_context(self):
yield
def write_entry(self, key, entry):
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
entry["TIMESTAMP"] = time.time()
self._write_entry(key=key, entry=entry)
def write_obj(self, key, obj, entry):
assert "DATA" not in entry
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
time_stamp = time.time()
entry["DATA"] = obj
entry["TIMESTAMP"] = time_stamp
self._write_entry(key=key, entry=entry)
def _write_entry(self, key, entry):
if key not in self.entries:
self.entries[key] = []
self.entries[key].append(entry)
def flush(self):
pass
VOID_LOGGER = _VoidZLogger()
PRINT_LOGGER = _PrintZLogger()
def load_log(fol_path):
all_paths = filesystem.find_files_with_ext(fol_path, "zlog")
log_data = {}
for path in all_paths:
key = os.path.abspath(path).replace(os.path.abspath(fol_path), "")[1:].replace(".zlog", "")
log_data[key] = py_io.read_jsonl(path)
return log_data
| 7,042 | 27.864754 | 112 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/runscript.py
|
import os
import torch
from transformers import AutoConfig
import jiant.proj.main.write_task_configs as write_task_configs
import jiant.proj.main.export_model as export_model
#import jiant.proj.main.tokenize_and_cache as tokenize_and_cache
import tokenize_and_cache # maple
import jiant.proj.main.scripts.configurator as configurator
import main_runscript as runscript # maple
import jiant.shared.distributed as distributed
import jiant.utils.zconf as zconf
import jiant.utils.python.io as py_io
from jiant.utils.python.logic import replace_none
from jiant.utils.python.io import read_json
@zconf.run_config
class RunConfiguration(zconf.RunConfig):
# === User parameters === #
user_mode = zconf.attr(type=str, default="")
log_dir = zconf.attr(type=str, default=".")
do_test = zconf.attr(action="store_true")#maple
# === Required parameters === #
run_name = zconf.attr(type=str, required=True)
exp_dir = zconf.attr(type=str, required=True)
data_dir = zconf.attr(type=str, required=True)
# === Model parameters === #
hf_pretrained_model_name_or_path = zconf.attr(type=str, required=True)
model_weights_path = zconf.attr(type=str, default=None)
model_cache_path = zconf.attr(type=str, default=None)
# === Task parameters === #
tasks = zconf.attr(type=str, default=None)
train_tasks = zconf.attr(type=str, default=None)
val_tasks = zconf.attr(type=str, default=None)
test_tasks = zconf.attr(type=str, default=None)
# === Misc parameters === #
train_batch_size = zconf.attr(type=int, default=32)
max_seq_length = zconf.attr(type=int, default=256)
num_train_epochs = zconf.attr(type=float, default=3)
train_examples_cap = zconf.attr(type=int, default=None)
create_config = zconf.attr(action="store_true")
# === Running Setup === #
do_save = zconf.attr(action="store_true")
do_save_last = zconf.attr(action="store_true")
do_save_best = zconf.attr(action="store_true")
write_val_preds = zconf.attr(action="store_true")
write_test_preds = zconf.attr(action="store_true")
eval_every_steps = zconf.attr(type=int, default=0)
min_train_steps = zconf.attr(type=int, default=0)#maple
save_every_steps = zconf.attr(type=int, default=0)
save_checkpoint_every_steps = zconf.attr(type=int, default=0)
no_improvements_for_n_evals = zconf.attr(type=int, default=0)
keep_checkpoint_when_done = zconf.attr(action="store_true")
force_overwrite = zconf.attr(action="store_true")
seed = zconf.attr(type=int, default=-1)
# === Training Learning Parameters === #
learning_rate = zconf.attr(default=1e-5, type=float)
adam_epsilon = zconf.attr(default=1e-8, type=float)
max_grad_norm = zconf.attr(default=1.0, type=float)
optimizer_type = zconf.attr(default="adam", type=str)
# === Specialized config === #
no_cuda = zconf.attr(action="store_true")
fp16 = zconf.attr(action="store_true")
fp16_opt_level = zconf.attr(default="O1", type=str)
local_rank = zconf.attr(default=-1, type=int)
server_ip = zconf.attr(default="", type=str)
server_port = zconf.attr(default="", type=str)
def _post_init(self):
assert self.tasks or (
self.train_tasks or self.val_tasks or self.test_tasks
), "Must include tasks or one of train_tasks, val_tasks, tests_tasks"
if self.tasks and (self.train_tasks or self.val_tasks or self.test_tasks):
assert (
([self.tasks] == self.train_tasks)
and ([self.tasks] == self.val_tasks)
and ([self.tasks] == self.test_tasks)
), "Tasks must be same as train_tasks/val_tasks/test_tasks if both are present"
if self.tasks:
self.train_tasks = self.tasks
self.val_tasks = self.tasks
self.test_tasks = self.tasks
self.train_tasks = self.train_tasks.split(",")
self.val_tasks = self.val_tasks.split(",")
self.test_tasks = self.test_tasks.split(",")
def create_and_write_task_configs(task_name_list, data_dir, task_config_base_path):
os.makedirs(task_config_base_path, exist_ok=True)
task_config_path_dict = {}
for task_name in task_name_list:
task_config_path = os.path.join(task_config_base_path, f"{task_name}_config.json")
write_task_configs.create_and_write_task_config(
task_name=task_name,
task_data_dir=os.path.join(data_dir, task_name),
task_config_path=task_config_path,
)
task_config_path_dict[task_name] = task_config_path
return task_config_path_dict
def run_simple(args: RunConfiguration, with_continue: bool = False):
hf_config = AutoConfig.from_pretrained(args.hf_pretrained_model_name_or_path)
model_cache_path = replace_none(
args.model_cache_path, default=os.path.join(args.exp_dir, "models")
)
with distributed.only_first_process(local_rank=args.local_rank):
# === Step 1: Write task configs based on templates === #
full_task_name_list = sorted(list(set(args.train_tasks + args.val_tasks + args.test_tasks)))
task_config_path_dict = {}
if args.create_config:
task_config_path_dict = create_and_write_task_configs(
task_name_list=full_task_name_list,
data_dir=args.data_dir,
task_config_base_path=os.path.join(args.data_dir, "configs"),
)
else:
for task_name in full_task_name_list:
task_config_path_dict[task_name] = os.path.join(
args.data_dir, "configs", f"{task_name}_config.json"
)
# === Step 2: Download models === #
if not os.path.exists(os.path.join(model_cache_path, hf_config.model_type)):
print("Downloading model")
export_model.export_model(
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
output_base_path=os.path.join(model_cache_path, hf_config.model_type),
)
# === Step 3: Tokenize and cache === #
phase_task_dict = {
"train": args.train_tasks,
"val": args.val_tasks,
"test": args.test_tasks,
}
for task_name in full_task_name_list:
phases_to_do = []
for phase, phase_task_list in phase_task_dict.items():
if task_name in phase_task_list and not os.path.exists(
os.path.join(args.exp_dir, "cache", hf_config.model_type, task_name, phase)
):
config = read_json(task_config_path_dict[task_name])
if phase in config["paths"]:
phases_to_do.append(phase)
else:
phase_task_list.remove(task_name)
if not phases_to_do:
continue
if args.do_test:#maple
phases_to_do.append("test_labels")
phases_to_do.append("train_labels")
print(f"Tokenizing Task '{task_name}' for phases '{','.join(phases_to_do)}'")
tokenize_and_cache.main(
tokenize_and_cache.RunConfiguration(
task_config_path=task_config_path_dict[task_name],
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
output_dir=os.path.join(args.exp_dir, "cache", hf_config.model_type, task_name),
phases=phases_to_do,
# TODO: Need a strategy for task-specific max_seq_length issues (issue #1176)
max_seq_length=args.max_seq_length,
smart_truncate=True,
do_iter=True,
)
)
# === Step 4: Generate jiant_task_container_config === #
# We'll do this with a configurator. Creating a jiant_task_config has a surprising
# number of moving parts.
jiant_task_container_config = configurator.SimpleAPIMultiTaskConfigurator(
task_config_base_path=os.path.join(args.data_dir, "configs"),
task_cache_base_path=os.path.join(args.exp_dir, "cache", hf_config.model_type),
train_task_name_list=args.train_tasks,
val_task_name_list=args.val_tasks,
test_task_name_list=args.test_tasks,
train_batch_size=args.train_batch_size,
eval_batch_multiplier=2,
epochs=args.num_train_epochs,
num_gpus=torch.cuda.device_count(),
train_examples_cap=args.train_examples_cap,
).create_config()
if args.do_test: #maple
for tsk in jiant_task_container_config['task_cache_config_dict']:
jiant_task_container_config['task_cache_config_dict'][tsk]['test_labels'] = jiant_task_container_config['task_cache_config_dict'][tsk]['val_labels'].replace("/val_labels", "/test_labels")
jiant_task_container_config['task_cache_config_dict'][tsk]['train_labels'] = jiant_task_container_config['task_cache_config_dict'][tsk]['val_labels'].replace("/val_labels", "/train_labels")
os.makedirs(os.path.join(args.exp_dir, "run_configs"), exist_ok=True)
jiant_task_container_config_path = os.path.join(
args.exp_dir, "run_configs", f"{args.run_name}_config.json"
)
py_io.write_json(jiant_task_container_config, path=jiant_task_container_config_path)
# === Step 5: Train/Eval! === #
if args.model_weights_path:
model_load_mode = "partial"
model_weights_path = args.model_weights_path
else:
# From Transformers
if any(task_name.startswith("mlm_") for task_name in full_task_name_list):
model_load_mode = "from_transformers_with_mlm"
else:
model_load_mode = "from_transformers"
model_weights_path = os.path.join(
model_cache_path, hf_config.model_type, "model", "model.p"
)
run_output_dir = os.path.join(args.exp_dir, "runs", args.run_name)
run_args = runscript.RunConfiguration(
# === Required parameters === #
jiant_task_container_config_path=jiant_task_container_config_path,
output_dir=run_output_dir,
# === Model parameters === #
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
model_path=model_weights_path,
model_config_path=os.path.join(
model_cache_path, hf_config.model_type, "model", "config.json",
),
model_load_mode=model_load_mode,
# === Running Setup === #
do_train=bool(args.train_tasks),
do_val=bool(args.val_tasks),
do_save=args.do_save,
do_save_best=args.do_save_best,
do_save_last=args.do_save_last,
write_val_preds=args.write_val_preds,
write_test_preds=args.write_test_preds,
eval_every_steps=args.eval_every_steps,
min_train_steps = args.min_train_steps,
save_every_steps=args.save_every_steps,
save_checkpoint_every_steps=args.save_checkpoint_every_steps,
no_improvements_for_n_evals=args.no_improvements_for_n_evals,
keep_checkpoint_when_done=args.keep_checkpoint_when_done,
force_overwrite=args.force_overwrite,
seed=args.seed,
# === Training Learning Parameters === #
learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon,
max_grad_norm=args.max_grad_norm,
optimizer_type=args.optimizer_type,
# === Specialized config === #
no_cuda=args.no_cuda,
fp16=args.fp16,
fp16_opt_level=args.fp16_opt_level,
local_rank=args.local_rank,
server_ip=args.server_ip,
server_port=args.server_port,
)
if (
args.save_checkpoint_every_steps
and os.path.exists(os.path.join(run_output_dir, "checkpoint.p"))
and with_continue
):
print("Resuming")
checkpoint = torch.load(os.path.join(run_output_dir, "checkpoint.p"))
#run_args = runscript.RunConfiguration.from_dict(checkpoint["metadata"]["args"])
else:
print("Running from start")
checkpoint = None
run_args.user_mode=args.user_mode #maple
run_args.min_train_steps=args.min_train_steps #maple
run_args.log_dir=args.log_dir #maple
run_args.do_test=args.do_test #maple
runscript.run_loop(args=run_args, checkpoint=checkpoint)
py_io.write_file(args.to_json(), os.path.join(run_output_dir, "simple_run_config.json"))
def main():
mode, cl_args = zconf.get_mode_and_cl_args()
args = RunConfiguration.default_run_cli(cl_args=cl_args)
user_mode = {e.split('=')[0] : e.split('=')[1] if len(e.split('=')) > 1 else None for e in (args.user_mode[0].split(',') if type(args.user_mode) is not str else args.user_mode.split(',')) }
if 'ptm' in user_mode:
args.hf_pretrained_model_name_or_path = user_mode['ptm']
if 'seed' in user_mode:
args.seed = int(user_mode['seed'])
if 'lr' in user_mode:
args.learning_rate = float(user_mode['lr'])
print("lr:", args.learning_rate)
if 'nie' in user_mode:
args.no_improvements_for_n_evals = int(user_mode['nie'])
if 'srand' in user_mode:
args.seed = args.seed + int(user_mode['srand'])
if mode == "run":
run_simple(args, with_continue=False)
elif mode == "run_with_continue":
run_simple(args, with_continue=True)
else:
raise zconf.ModeLookupError(mode)
if __name__ == "__main__":
main()
| 13,469 | 43.163934 | 201 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/evaluate.py
|
import json
import os
import torch
import jiant.utils.python.io as py_io
import jiant.proj.main.components.task_sampler as jiant_task_sampler
from jiant.proj.main.components.evaluate import *
def write_val_results(val_results_dict, metrics_aggregator, output_dir, verbose=True, result_file = "val_metrics.json"):
full_results_to_write = {
"aggregated": jiant_task_sampler.compute_aggregate_major_metrics_from_results_dict(
metrics_aggregator=metrics_aggregator, results_dict=val_results_dict,
),
}
for task_name, task_results in val_results_dict.items():
task_results_to_write = {}
if "loss" in task_results:
task_results_to_write["loss"] = task_results["loss"]
if "metrics" in task_results:
task_results_to_write["metrics"] = task_results["metrics"].to_dict()
full_results_to_write[task_name] = task_results_to_write
metrics_str = json.dumps(full_results_to_write, indent=2)
if verbose:
print(metrics_str)
py_io.write_json(data=full_results_to_write, path=os.path.join(output_dir, result_file))
print("Saved at " + os.path.join(output_dir, result_file))
| 1,180 | 38.366667 | 120 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/metarunner.py
|
import jiant.proj.main.metarunner as jiant_metarunner
from jiant.utils.zlog import BaseZLogger, PRINT_LOGGER
import jiant.proj.main.runner as jiant_runner
class JiantMetarunner(jiant_metarunner.JiantMetarunner):
def __init__(
self,
runner: jiant_runner.JiantRunner,
save_every_steps,
eval_every_steps,
min_train_steps,
save_checkpoint_every_steps,
no_improvements_for_n_evals,
checkpoint_saver,
output_dir,
verbose: bool = True,
save_best_model: bool = True,
load_best_model: bool = True,
save_last_model: bool = True,
log_writer: BaseZLogger = PRINT_LOGGER,
):
super().__init__(
runner,
save_every_steps,
eval_every_steps,
save_checkpoint_every_steps,
no_improvements_for_n_evals,
checkpoint_saver,
output_dir,
verbose,
save_best_model,
load_best_model,
save_last_model,
log_writer,
)
self.min_train_steps = min_train_steps
def should_break_training(self) -> bool:
return super().should_break_training() and (self.min_train_steps == 0 or self.train_state.global_steps > self.min_train_steps)
| 1,294 | 32.205128 | 134 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/container_setup.py
|
import warnings
from dataclasses import dataclass
from typing import Dict, List, Optional
import jiant.proj.main.components.task_sampler as jiant_task_sampler
import jiant.shared.caching as caching
from jiant.tasks.core import Task
from jiant.tasks.retrieval import create_task_from_config_path
import jiant.utils.python.io as py_io
from jiant.utils.python.datastructures import ExtendedDataClassMixin
from jiant.proj.main.components.container_setup import *
def create_task_cache_dict(task_cache_config_dict: Dict) -> Dict:
"""Takes a map of task cache configs, and returns map of instantiated task data cache objects.
Notes:
This function assumes that data is divided and stored according to phase where phase takes
a value of train, val, val_labels, or test.
Args:
task_cache_config_dict (Dict[str, Dict[str, str]]): maps of task names to cache file dirs.
Returns:
Dict[str, Dict[str, ChunkedFilesDataCache]] mappings from task name to task cache objects.
"""
task_cache_dict = {}
for task_name, task_cache_config in task_cache_config_dict.items():
single_task_cache_dict = {}
for phase in ["train", "val", "val_labels", "test", "test_labels", "train_labels"]:
if phase in task_cache_config:
single_task_cache_dict[phase] = caching.ChunkedFilesDataCache(
task_cache_config[phase],
)
task_cache_dict[task_name] = single_task_cache_dict
return task_cache_dict
def create_jiant_task_container(
task_config_path_dict: Dict,
task_cache_config_dict: Dict,
sampler_config: Dict,
global_train_config: Dict,
task_specific_configs_dict: Dict,
metric_aggregator_config: Dict,
taskmodels_config: Dict,
task_run_config: Dict,
verbose: bool = True,
) -> JiantTaskContainer:
"""Read and interpret config files, initialize configuration objects, return JiantTaskContainer.
Args:
task_config_path_dict (Dict[str, str]): map of task names to task config files.
task_cache_config_dict (Dict[str, str]): map of task names to cache file dirs.
sampler_config (Dict): map containing sample config options, e.g., uniform task sampling.
global_train_config (Dict): map of training configs shared by all tasks (e.g., max_steps).
task_specific_configs_dict (Dict): map of maps mapping task names to task-specific options.
metric_aggregator_config (Dict): map containing task metric aggregation options.
taskmodels_config: maps mapping from tasks to models, and specifying task-model configs.
task_run_config: config determining which tasks are used in which phase (e.g., train).
verbose: True to print task info.
Returns:
JiantTaskContainer carrying components configured and set up pre-runner.
"""
task_dict = create_task_dict(task_config_dict=task_config_path_dict, verbose=verbose)
task_cache_dict = create_task_cache_dict(task_cache_config_dict=task_cache_config_dict)
global_train_config = GlobalTrainConfig.from_dict(global_train_config)
task_specific_config = create_task_specific_configs(
task_specific_configs_dict=task_specific_configs_dict,
)
taskmodels_config = TaskmodelsConfig.from_dict(taskmodels_config)
task_run_config = TaskRunConfig.from_dict(task_run_config)
num_train_examples_dict = get_num_train_examples(
task_cache_dict=task_cache_dict, train_task_list=task_run_config.train_task_list,
)
task_sampler = jiant_task_sampler.create_task_sampler(
sampler_config=sampler_config,
# task sampler samples only from the training tasks
task_dict={
task_name: task_dict[task_name] for task_name in task_run_config.train_task_list
},
task_to_num_examples_dict=num_train_examples_dict,
)
metric_aggregator = jiant_task_sampler.create_metric_aggregator(
metric_aggregator_config=metric_aggregator_config,
)
return JiantTaskContainer(
task_dict=task_dict,
task_sampler=task_sampler,
global_train_config=global_train_config,
task_cache_dict=task_cache_dict,
task_specific_configs=task_specific_config,
taskmodels_config=taskmodels_config,
task_run_config=task_run_config,
metrics_aggregator=metric_aggregator,
)
def create_jiant_task_container_from_dict(
jiant_task_container_config_dict: Dict, verbose: bool = True
) -> JiantTaskContainer:
return create_jiant_task_container(
task_config_path_dict=jiant_task_container_config_dict["task_config_path_dict"],
task_cache_config_dict=jiant_task_container_config_dict["task_cache_config_dict"],
sampler_config=jiant_task_container_config_dict["sampler_config"],
global_train_config=jiant_task_container_config_dict["global_train_config"],
task_specific_configs_dict=jiant_task_container_config_dict["task_specific_configs_dict"],
taskmodels_config=jiant_task_container_config_dict["taskmodels_config"],
task_run_config=jiant_task_container_config_dict["task_run_config"],
metric_aggregator_config=jiant_task_container_config_dict["metric_aggregator_config"],
verbose=verbose,
)
def create_jiant_task_container_from_json(
jiant_task_container_config_path: str, verbose: bool = True
) -> JiantTaskContainer:
return create_jiant_task_container_from_dict(
jiant_task_container_config_dict=py_io.read_json(jiant_task_container_config_path),
verbose=verbose,
)
| 5,589 | 47.189655 | 100 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/runner.py
|
from http.client import NotConnected
from typing import Dict
from dataclasses import dataclass
import torch
import math
import numpy as np
import copy
#from functorch import *
from torch.autograd.functional import *
import jiant.tasks.evaluate as evaluate
import jiant.utils.torch_utils as torch_utils
#from jiant.proj.main.components.container_setup import JiantTaskContainer
from container_setup import JiantTaskContainer # maple
from jiant.proj.main.modeling.primary import JiantModel, wrap_jiant_forward
from jiant.shared.constants import PHASE
from jiant.shared.runner import (
#complex_backpropagate,
get_train_dataloader_from_cache,
get_eval_dataloader_from_cache,
)
from jiant.utils.display import maybe_tqdm
from jiant.utils.python.datastructures import InfiniteYield, ExtendedDataClassMixin
def complex_backpropagate(
loss, optimizer, model, fp16, n_gpu, gradient_accumulation_steps, max_grad_norm, retain_graph = False
):
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if fp16:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)
else:
loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
return loss
@dataclass
class RunnerParameters(ExtendedDataClassMixin):
local_rank: int
n_gpu: int
fp16: bool
max_grad_norm: float
@dataclass
class TrainState(ExtendedDataClassMixin):
global_steps: int
task_steps: Dict[str, int]
@classmethod
def from_task_name_list(cls, task_name_list):
return cls(global_steps=0, task_steps={task_name: 0 for task_name in task_name_list})
def step(self, task_name):
self.task_steps[task_name] += 1
self.global_steps += 1
# Maple for hvp
# Following are utilities to make nn.Module functional
def del_attr(obj, names):
if len(names) == 1:
delattr(obj, names[0])
else:
del_attr(getattr(obj, names[0]), names[1:])
def set_attr(obj, names, val):
if len(names) == 1:
setattr(obj, names[0], val)
else:
set_attr(getattr(obj, names[0]), names[1:], val)
def make_functional(mod):
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
del_attr(mod, name.split("."))
names.append(name)
return orig_params, names
def get_parms(mod):
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
parms = []
names = []
for name, p in list(mod.named_parameters()):
parms.append(copy.deepcopy(p))
del_attr(mod, name.split("."))
names.append(name)
return parms, names
def load_weights(mod, names, params):
for name, p in zip(names, params):
set_attr(mod, name.split("."), p)
class JiantRunner:
def __init__(
self,
jiant_task_container: JiantTaskContainer,
jiant_model: JiantModel,
optimizer_scheduler,
device,
rparams: RunnerParameters,
log_writer,
):
self.jiant_task_container = jiant_task_container
self.jiant_model = jiant_model
self.optimizer_scheduler = optimizer_scheduler
self.device = device
self.rparams = rparams
self.log_writer = log_writer
self.model = self.jiant_model
def run_train(self):
for _ in self.run_train_context():
pass
def run_train_context(self, verbose=True):
train_dataloader_dict = self.get_train_dataloader_dict()
train_state = TrainState.from_task_name_list(
self.jiant_task_container.task_run_config.train_task_list
)
pbar = maybe_tqdm(
range(self.jiant_task_container.global_train_config.max_steps),
desc="Training",
verbose=verbose,
)
for _ in pbar:
self.run_train_step(
train_dataloader_dict=train_dataloader_dict, train_state=train_state,
pbar = pbar
)
yield train_state
def resume_train_context(self, train_state, verbose=True):
train_dataloader_dict = self.get_train_dataloader_dict()
start_position = train_state.global_steps
pbar = maybe_tqdm(
range(start_position, self.jiant_task_container.global_train_config.max_steps),
desc="Training",
initial=start_position,
total=self.jiant_task_container.global_train_config.max_steps,
verbose=verbose,
)
for _ in pbar:
self.run_train_step(
train_dataloader_dict=train_dataloader_dict, train_state=train_state,
pbar = pbar
)
yield train_state
def run_train_step(self, train_dataloader_dict: dict, train_state: TrainState, pbar):
self.jiant_model.train()
task_name, task = self.jiant_task_container.task_sampler.pop()
task_specific_config = self.jiant_task_container.task_specific_configs[task_name]
loss_val = 0
"""if 'sctmask' in self.user_mode:
retain_graph = True
else:
retain_graph = False """
if 'sctmask' in self.user_mode:
#import numpy as np
parms = dict(self.jiant_model.named_parameters())
for name in parms:
if not name.startswith('encoder') or '__' in name:
continue
xname = "sctmask__" + name.replace(".", "__")
idx = self.jiant_model.encoder.idx_dict[xname]
x = getattr(self.jiant_model.encoder, xname)
parms[name].detach_()
parms[name].flatten()[idx] = x
elif 'prompt' in self.user_mode:
self.jiant_model.encoder.embeddings.word_embeddings.weight.detach_()
self.jiant_model.encoder.embeddings.word_embeddings.weight[:] = torch.cat([self.jiant_model.encoder.embeddings.word_embeddings.weight_ori, self.jiant_model.encoder.embeddings.prompt_weight])
elif 'diffprun' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "dp_burnin_step"):
self.dp_burnin_step = 500 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.dp_mask = {}
self.dp_step = 0
for p in parms:
if not p.startswith('encoder.w'):
continue
pn = p.replace("encoder.w__", "").replace("__", ".")
self.dp_mask[pn] = torch.zeros_like(parms[p])
self.dp_step += 1
if self.dp_step < self.dp_burnin_step:
l0s = []
for p in parms:
if not p.startswith('encoder.w'):
continue
p = p.replace("encoder.w__", "").replace("__", ".")
alpha = getattr(self.jiant_model.encoder, "ber__" + p.replace(".", "__"))
w = getattr(self.jiant_model.encoder, "w__" + p.replace(".", "__"))
u = 1e-6 + torch.rand_like(w) * (1- 2e-6)
st = torch.sigmoid(torch.log(u) - torch.log(1-u) + alpha)
clamp = float(self.user_mode['clamp']) if 'clamp' in self.user_mode else 3.
l, r = -clamp, clamp
stb = st * (r-l) + l
z = stb.clamp_min(0).clamp_max(1)
nw = self.jiant_model.ori_pars[p] + z * w
node = self.jiant_model
pnames = p.split(".")
for pname in pnames[:-1]:
node = getattr(node, pname)
delattr(node, pnames[-1])
setattr(node, pnames[-1], nw)
l0s.append(torch.sigmoid(alpha - math.log(-l/r)).flatten())
l0 = torch.cat(l0s).mean()
elif self.dp_step >= self.dp_burnin_step:
l0 = torch.tensor(0)
if self.dp_step == self.dp_burnin_step:
for p in self.dp_mask:
alpha = getattr(self.jiant_model.encoder, "ber__" + p.replace(".", "__"))
alpha.requires_grad = False
_, idx = torch.topk(alpha.flatten().abs(), k = int(float(self.user_mode['diffprun']) * alpha.numel()))
self.dp_mask[p].flatten()[idx] = 1.0
for p in parms:
if not p.startswith('encoder.w'):
continue
p = p.replace("encoder.w__", "").replace("__", ".")
w = getattr(self.jiant_model.encoder, "w__" + p.replace(".", "__"))
nw = self.jiant_model.ori_pars[p] + self.dp_mask[p] * w
node = self.jiant_model
pnames = p.split(".")
for pname in pnames[:-1]:
node = getattr(node, pname)
delattr(node, pnames[-1])
setattr(node, pnames[-1], nw)
for i in range(task_specific_config.gradient_accumulation_steps):
batch, batch_metadata = train_dataloader_dict[task_name].pop()
batch = batch.to(self.device)
if 'prompt' in self.user_mode:
ptsize = int(self.user_mode['prompt'])
input_ids = batch.input_ids.new_zeros([batch.input_ids.shape[0], ptsize])
input_ids[:] = torch.arange(ptsize) + self.jiant_model.encoder.embeddings.word_embeddings.num_embeddings
batch.input_ids = torch.cat([input_ids, batch.input_ids], 1)
batch.input_mask = torch.cat([torch.ones_like(input_ids), batch.input_mask], 1)
batch.segment_ids = torch.cat([torch.zeros_like(input_ids), batch.segment_ids], 1)
elif 'qapp_functorch' in self.user_mode:#functorch
model = copy.deepcopy(self.jiant_model)
model.eval()
func, params, buffers = make_functional_with_buffers(model)
def compute_loss(params, buffers):
y = func(params, buffers, batch=batch, task=task, compute_loss=True)
return y['loss']
grad(grad(compute_loss))(params, buffers)
elif 'qapp' in self.user_mode:
if not hasattr(self, 'q_h'):
self.q_h = {}
if not hasattr(self, 'bs_step') or self.bs_step < self.bs_burnin_step:
#import copy
model = copy.deepcopy(self.jiant_model)
model.eval()
params, names = make_functional(model)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in params)
# your forward function with update
def forward(*new_params):
# this line replace your for loop
load_weights(model, names, new_params)
model_output = wrap_jiant_forward(
jiant_model=model, batch=batch, task=task, compute_loss=True,
)
return model_output.loss
ones = tuple([torch.ones_like(p) for p in params])
hv = hvp(forward, params, ones)[1]
hvs = {name : t for name, t in zip(names, hv)}
elif 'hda_backpack' in self.user_mode:
from backpack.extensions import BatchDiagHessian, DiagHessian
from backpack import backpack, extend
model = copy.deepcopy(self.jiant_model)
model = extend(model)
model.eval()
model_output = wrap_jiant_forward(
jiant_model=model, batch=batch, task=task, compute_loss=False,
)
loss_fct = extend(nn.CrossEntropyLoss())
loss = loss_fct(model_output.logits.view(-1, self.head.num_labels), batch.label_id.view(-1),)
with backpack(DiagHessian(), BatchDiagHessian()):
loss = self.complex_backpropagate(
loss=loss,
gradient_accumulation_steps=task_specific_config.gradient_accumulation_steps,
#retain_graph = retain_graph
)
elif 'hda' in self.user_mode:
if not hasattr(self, 'hvs'):
self.hvs = {}
if not hasattr(self, 'bs_step') or self.bs_step < self.bs_burnin_step:
#import copy
model = copy.deepcopy(self.jiant_model)
model.eval()
params, names = make_functional(model)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in params)
# your forward function with update
def forward(*new_params):
# this line replace your for loop
load_weights(model, names, new_params)
model_output = wrap_jiant_forward(
jiant_model=model, batch=batch, task=task, compute_loss=True,
)
return model_output.loss
rad = tuple([(torch.rand_like(p) > 0.5).float() * 2 - 1 for p in params])
N = 10
for i in range(N):
hv = vhp(forward, params, rad)[1]
for name, r, t in zip(names, rad, hv):
self.hvs[name] = t*r / N if name not in self.hvs else self.hvs[name] + t*r / N
model_output = wrap_jiant_forward(
jiant_model=self.jiant_model, batch=batch, task=task, compute_loss=True,
)
if 'diffprun' in self.user_mode:
lbd = float(self.user_mode['lambda']) if ('lambda' in self.user_mode) else 1.
model_output.loss = model_output.loss + lbd * l0
if 'l2sp' in self.user_mode:
lbd = float(self.user_mode['l2sp']) if ('l2sp' in self.user_mode and self.user_mode['l2sp'] is not None) else 1.
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "l2sp_w0"):
#import copy
self.l2sp_w0 = {}
for p in parms:
if 'taskmodels_dict' in p:
continue
self.l2sp_w0[p] = copy.deepcopy(parms[p].data)
rs = []
for p in self.l2sp_w0:
rs.append((parms[p] - self.l2sp_w0[p]).flatten()**2)
model_output.loss = model_output.loss + lbd * torch.cat(rs).mean()
if 'lnsr' in self.user_mode:
#import copy
lbd = float(self.user_mode['lambda']) if ('lambda' in self.user_mode) else 1.
embbak = self.jiant_model.encoder.embeddings.word_embeddings.weight.data
self.jiant_model.encoder.embeddings.word_embeddings.weight.data = copy.deepcopy(embbak) + torch.randn_like(embbak) * 0.01
with torch.no_grad():
model_output1 = wrap_jiant_forward(
jiant_model=self.jiant_model, batch=batch, task=task, compute_loss=True,
)
self.jiant_model.encoder.embeddings.word_embeddings.weight.data = embbak
a, b = model_output.other[-1], model_output1.other[-1]
if type(a) is list:
a, b = torch.cat(a), torch.cat(b)
model_output.loss = model_output.loss + lbd * ((a - b)**2).mean()
loss = self.complex_backpropagate(
loss=model_output.loss,
gradient_accumulation_steps=task_specific_config.gradient_accumulation_steps,
#retain_graph = retain_graph
)
loss_val += loss.item()
if 'bottleneck' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if 'output.dense' not in p and 'taskmodels_dict' not in p:
parms[p].grad = None
continue
elif 'output.dense' in p and 'weight' in p:
parms[p].grad[:, int(parms[p].shape[1] * 0.2):] = 0
elif 'fixptm' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if 'taskmodels_dict' not in p:
parms[p].grad = None
elif 'randmask' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "grad_mask"):
self.grad_mask = {}
for p in parms:
#if p.startswith('encoder') and ('attention' in p or 'embeddings' in p): # quite good why?
if p.startswith('encoder'):
self.grad_mask[p] = torch.rand_like(parms[p]) > float(self.user_mode['randmask'])
for p in parms:
if p in self.grad_mask and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.grad_mask[p], 0.)
elif 'psearch' in self.user_mode:# DEPRECATED: It still changes all parms
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "ps_mask"):
self.ps_mask = {}
self.ps_accu = {}
self.ps_masked = {}
self.ps_finished = {}
self.ps_step = 0
self.ps_update = 10
self.ps_r = float(self.user_mode['psearch']) / 2
for p in parms:
self.ps_mask[p] = torch.zeros_like(parms[p]).bool()
self.ps_masked[p] = 0
self.ps_finished[p] = False
self.ps_accu[p] = 0
for p in parms:
if parms[p].grad is not None:
self.ps_accu[p] = self.ps_accu[p] + parms[p].grad.abs()
if not self.ps_finished[p] and self.ps_step % self.ps_update == self.ps_update - 1 and self.ps_step > 1 and p.startswith('encoder'):
remain = self.ps_accu[p].masked_fill(self.ps_mask[p], float('inf'))
size = parms[p].numel()
_, idx = torch.topk(-remain.flatten(), k = int(self.ps_r * size))
newm = self.ps_masked[p] + len(idx)
if newm >= (1 - float(self.user_mode['psearch'])) * size:
self.ps_finished[p] = True
print("%s : Fixed."%p)
continue
self.ps_mask[p].flatten()[idx] = True
if p in self.ps_mask:
parms[p].grad.masked_fill_(self.ps_mask[p], 0.)
self.ps_step += 1
elif 'bsearch' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "bs_mask"):
self.bs_burnin_step = 500 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.bs_step = 0
self.bs_mask = {}
self.bs_accu = {}
for p in parms:
self.bs_mask[p] = torch.ones_like(parms[p]).bool()
self.bs_accu[p] = 0
if "happ" in self.user_mode:
self.happ_accu = {}
self.happ_prev = {}
for p in parms:
self.happ_accu[p] = 0
self.happ_prev[p] = 0
self.bs_step += 1
if self.bs_step < self.bs_burnin_step:
if 'arand' in self.user_mode:
self.jiant_model.eval()
for p in parms:
if parms[p].grad is not None:
if "fisher" in self.user_mode:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad**2
elif "abs" in self.user_mode:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad.abs()
elif "m1" in self.user_mode:
self.q_h[p] = hvs[p] if p not in self.q_h else self.q_h[p] + hvs[p]
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad
elif "qapp" in self.user_mode:
nhv = 1. / hvs[p] / 1000
nhv[(nhv > 1.) | (nhv < -1.)] = 0
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad * nhv
elif "happ" in self.user_mode:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad
self.happ_accu[p] += (parms[p].grad - (self.happ_prev[p] if p in self.happ_prev else 0)).abs()
self.happ_prev[p] = parms[p].grad
else:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad
if "fdm" in self.user_mode:
hmodel = copy.deepcopy(self.jiant_model)
hparms = dict(hmodel.named_parameters())
for p in hparms:
hparms[p].data = hparms[p].data * 0.9
hmodel_output = wrap_jiant_forward(
jiant_model=hmodel, batch=batch, task=task, compute_loss=True,
)
hloss = self.complex_backpropagate(
loss=hmodel_output.loss,
gradient_accumulation_steps=task_specific_config.gradient_accumulation_steps,
#retain_graph = retain_graph
)
if not hasattr(self, 'fdm_hessian'):
self.fdm_hessian = {p: 0 for p in parms}
hparms = dict(hmodel.named_parameters())
for p in parms:
hpp = (parms[p].grad - hparms[p].grad) / (0.1 * parms[p].data)
hpp[hpp.isnan()] = 0
self.fdm_hessian[p] += hpp
self.optimizer_scheduler.optimizer.zero_grad()
return
elif self.bs_step == self.bs_burnin_step:
for p in parms:
if parms[p].grad is None:
continue
if "vanish" in self.user_mode:
_, idx = torch.topk(-self.bs_accu[p].flatten().abs(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif "m1" in self.user_mode:
#score = self.bs_accu[p] * (1. / self.q_h[p]).clamp(min=-100, max=100) / 100
score = self.bs_accu[p] * ((1. / self.q_h[p]).sigmoid()*2-1)
_, idx = torch.topk(score.flatten().abs(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif "happ" in self.user_mode:
score = self.bs_accu[p].abs() - self.happ_accu[p]
score[score.isnan()] = 0
_, idx = torch.topk(score.flatten(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif "fdm" in self.user_mode:
#self.fdm_hessian[p][self.fdm_hessian[p].abs() < 1e-3] = 0
scale = float(self.user_mode['fdm']) if 'fdm' in self.user_mode else 1.
score = (self.bs_accu[p] * ((1. / self.fdm_hessian[p] * scale).sigmoid()*2-1) ).abs()
score[score.isnan()] = 0
_, idx = torch.topk(score.flatten(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif 'hda_v1' in self.user_mode:
r = float(self.user_mode['hda']) if self.user_mode['hda'] is not None else 0.1
ksize = 11#self.hvs[p].numel() // 10
h = torch.nn.functional.avg_pool1d(torch.nn.functional.pad(self.hvs[p].flatten(), (ksize // 2, ksize // 2)).unsqueeze(0), ksize, stride = 1).abs()
h = h.squeeze(0)
m = h[h!=0].mean()
h1 = h * r + (1 - r) * m
h2 = (1 / h1)
s = (h2 < 1).float()
h3 = s * h2 + (1 - s) * (1 + h2.log10())
print(h3.max(), h3.min(), (h3.max() - h3.min()) / h3.min())
score = (self.bs_accu[p].flatten() * h3 ).abs()
_, idx = torch.topk(score, k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif 'hda' in self.user_mode:
r = float(self.user_mode['hda']) if self.user_mode['hda'] is not None else 0.005
score = (2 * self.bs_accu[p].flatten().abs().log10() - self.hvs[p].flatten().abs().clamp(0.1).log10() * r)
b = self.hvs[p].flatten().abs().clamp(0.1).log10() * r
a = 2 * self.bs_accu[p].flatten().abs().log10()
#print(b.min().item(), b.max().item(), b.max().item() - b.min().item(), a.max().item())
_, idx = torch.topk(score, k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
_, idx1 = torch.topk(a, k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
sidx = set(idx1.tolist())
print(len([i for i in idx.tolist() if i in sidx]) / max(len(idx), 1))
else:
_, idx = torch.topk(self.bs_accu[p].flatten().abs(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
self.bs_mask[p].flatten()[idx] = False
if 'arand' in self.user_mode:
self.jiant_model.train()
for p in parms:
if p in self.bs_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.bs_mask[p], 0.)
elif 'magprun' in self.user_mode:# former impabs
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "mag_step"):
self.mag_burnin_step = 500 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.mag_step = 0
self.mag_step += 1
if self.mag_step == self.mag_burnin_step and not hasattr(self, "ia_mask"):
self.ia_mask = {}
for p in parms:
self.ia_mask[p] = torch.ones_like(parms[p]).bool()
_, idx = torch.topk(parms[p].abs().flatten(), k = int(float(self.user_mode['magprun']) * parms[p].numel()))
self.ia_mask[p].flatten()[idx] = False
for p in parms:
if hasattr(self, 'ia_mask') and p in self.ia_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.ia_mask[p], 0.)
elif 'impsa' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "isa_mask"):
self.isa_mask = {}
plist = []
for p in parms:
self.isa_mask[p] = torch.ones_like(parms[p]).bool()
plist.append(parms[p].data.cpu().flatten())
apars = torch.cat(plist)
_, idx = torch.topk(apars, k = int(float(self.user_mode['impsa']) * apars.numel()))
startid = 0
for p in parms:
pids = idx.masked_select((startid <= idx) & (idx < startid + parms[p].numel()))
pids -= startid
self.isa_mask[p].flatten()[pids] = False
startid += parms[p].numel()
if pids.numel() == 0 and p.startswith('encoder'):
parms[p].requires_grad = False
for p in parms:
if p in self.isa_mask and p.startswith('encoder') and parms[p].requires_grad:
parms[p].grad.masked_fill_(self.isa_mask[p], 0.)
elif 'impback' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "ib_mask"):
self.ib_burnin_step = 1000
self.ib_step = 0
self.ib_mask = {}
self.ib_weights = {}
for p in parms:
self.ib_mask[p] = torch.ones_like(parms[p]).bool()
self.ib_weights[p] = parms[p].detach().cpu()
self.ib_step += 1
if self.ib_step == self.ib_burnin_step:
for p in parms:
_, idx = torch.topk(parms[p].abs().flatten(), k = int(float(self.user_mode['impback']) * parms[p].numel()))
parms[p].data[:] = self.ib_weights[p]
self.ib_mask[p].flatten()[idx] = False
if self.ib_step >= self.ib_burnin_step:
for p in parms:
if p in self.ib_mask and p.startswith('encoder'):
parms[p].grad.masked_fill_(self.ib_mask[p], 0.)
elif 'bitfit' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if not p.endswith(".bias") and "taskmodels_dict" not in p and parms[p].grad is not None:
parms[p].grad[:] = 0
elif 'gproj' in self.user_mode:
if not hasattr(self, "gp_step"):
#import copy
parms = dict(self.jiant_model.named_parameters())
self.w0 = copy.deepcopy(parms)
self.gp_step = 0
self.gp_mask = {}
self.gp_burnin_step = 1e100 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.gp_gstep = 1 if 'gstep' not in self.user_mode else int(self.user_mode['gstep'])
self.gp_step += 1
if self.gp_step % self.gp_gstep == 0 and self.gp_step <= self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if parms[p].grad is None or not p.startswith('encoder'):
continue
_, idx = torch.topk((parms[p] - self.w0[p]).flatten().abs(), k = int(float(self.user_mode['gproj']) * parms[p].numel()))
self.gp_mask[p] = torch.ones_like(parms[p]).bool()
self.gp_mask[p].flatten()[idx] = False
print("The masked_select error has not been fiexed!!!")
parms[p].data.masked_select(self.gp_mask[p])[:] = self.w0[p].masked_select(self.gp_mask[p])
if self.gp_step == self.gp_burnin_step and 'reset' in self.user_mode:
for p in parms:
parms[p].data[:] = self.w0[p].data
elif self.gp_step > self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if p in self.gp_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.gp_mask[p], 0.)
elif 'sgpa' in self.user_mode:
if not hasattr(self, "gp_step"):
#import copy
parms = dict(self.jiant_model.named_parameters())
self.w0 = copy.deepcopy(parms)
self.gp_step = 0
self.gp_mask = {}
self.gp_burnin_step = 1e100 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.gp_gstep = 1 if 'gstep' not in self.user_mode else int(self.user_mode['gstep'])
if 'mmt' in self.user_mode:
self.gp_mmt = {}
self.gp_mmtr = 0.3 if self.user_mode['mmt'] is None else float(self.user_mode['mmt'])
self.gp_step += 1
if self.gp_step > self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if p in self.gp_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.gp_mask[p], 0.)
self.optimizer_scheduler.step()
self.optimizer_scheduler.optimizer.zero_grad()
if 'sgpa' in self.user_mode and self.gp_step <= self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
if self.gp_step % self.gp_gstep == 0:
for p in parms:
if parms[p].grad is None or not p.startswith('encoder'):
continue
with torch.no_grad():
diff = (parms[p] - self.w0[p]).flatten().abs()
if "mmt" in self.user_mode:
diff = self.gp_mmtr * diff + (1-self.gp_mmtr) * self.gp_mmt[p] if p in self.gp_mmt else diff
self.gp_mmt[p] = diff
_, idx = torch.topk(diff, k = int(float(self.user_mode['sgpa']) * parms[p].numel()))
self.gp_mask[p] = torch.ones_like(parms[p]).bool()
self.gp_mask[p].flatten()[idx] = False
#parms[p].data.masked_select(self.gp_mask[p])[:] = self.w0[p].masked_select(self.gp_mask[p])
parms[p].data[:] = parms[p].data[:] * (~self.gp_mask[p]) + self.w0[p] * self.gp_mask[p]
if self.gp_step == self.gp_burnin_step and 'reset' in self.user_mode:
for p in parms:
parms[p].data[:] = self.w0[p].data
train_state.step(task_name=task_name)
entry = {
"task": task_name,
"task_step": train_state.task_steps[task_name],
"global_step": train_state.global_steps,
"loss_val": loss_val / task_specific_config.gradient_accumulation_steps,
}
if 'diffprun' in self.user_mode:
entry["loss_val"] = entry["loss_val"] - l0.item()
entry["loss_l0"] = l0.item()
self.log_writer.write_entry(
"loss_train",
entry,
)
pbar.set_postfix({'loss': loss_val / task_specific_config.gradient_accumulation_steps})
def run_val(self, task_name_list, use_subset=None, return_preds=False, verbose=True, phase = "val"):
print("Log Dir:", self.log_writer.tb_writer.logdir)
evaluate_dict = {}
val_dataloader_dict = self.get_val_dataloader_dict(
task_name_list=task_name_list, use_subset=use_subset, phase = phase
)
val_labels_dict = self.get_val_labels_dict(
task_name_list=task_name_list, use_subset=use_subset, label_phase = phase
)
emodel = self.jiant_model
if 'mixout' in self.user_mode:
#import copy
emodel = copy.deepcopy(self.jiant_model)
parms = dict(emodel.named_parameters())
for p in parms:
if not p.startswith("encoder."):
continue
node = emodel.encoder
node0 = self.encoder0
pnames = p.split(".")
for pname in pnames[1:-1]:
node = getattr(node, pname)
node0 = getattr(node0, pname)
msk = (torch.rand_like(getattr(node, pnames[-1])) < float(self.user_mode['mixout'])).float()
nw = (1 - msk) * getattr(node, pnames[-1]) + msk * getattr(node0, pnames[-1])
delattr(node, pnames[-1])
setattr(node, pnames[-1], nw)
for task_name in task_name_list:
task = self.jiant_task_container.task_dict[task_name]
evaluate_dict[task_name] = run_val(
val_dataloader=val_dataloader_dict[task_name],
val_labels=val_labels_dict[task_name],
jiant_model=emodel,
task=task,
device=self.device,
local_rank=self.rparams.local_rank,
return_preds=return_preds,
verbose=verbose,
tag = phase,#maple
user_mode = self.user_mode,
)
return evaluate_dict
def run_test(self, task_name_list, verbose=True):
evaluate_dict = {}
test_dataloader_dict = self.get_test_dataloader_dict()
for task_name in task_name_list:
task = self.jiant_task_container.task_dict[task_name]
evaluate_dict[task_name] = run_test(
test_dataloader=test_dataloader_dict[task_name],
jiant_model=self.jiant_model,
task=task,
device=self.device,
local_rank=self.rparams.local_rank,
verbose=verbose,
)
return evaluate_dict
def get_train_dataloader_dict(self):
# Not currently supported distributed parallel
train_dataloader_dict = {}
for task_name in self.jiant_task_container.task_run_config.train_task_list:
task = self.jiant_task_container.task_dict[task_name]
train_cache = self.jiant_task_container.task_cache_dict[task_name]["train"]
train_batch_size = self.jiant_task_container.task_specific_configs[
task_name
].train_batch_size
train_dataloader_dict[task_name] = InfiniteYield(
get_train_dataloader_from_cache(
train_cache=train_cache, task=task, train_batch_size=train_batch_size,
)
)
return train_dataloader_dict
def _get_eval_dataloader_dict(self, phase, task_name_list, use_subset=False):
val_dataloader_dict = {}
for task_name in task_name_list:
task = self.jiant_task_container.task_dict[task_name]
eval_cache = self.jiant_task_container.task_cache_dict[task_name][phase]
task_specific_config = self.jiant_task_container.task_specific_configs[task_name]
val_dataloader_dict[task_name] = get_eval_dataloader_from_cache(
eval_cache=eval_cache,
task=task,
eval_batch_size=task_specific_config.eval_batch_size,
subset_num=task_specific_config.eval_subset_num if use_subset else None,
)
return val_dataloader_dict
def get_val_dataloader_dict(self, task_name_list, use_subset=False, phase = "val"):
return self._get_eval_dataloader_dict(
phase, task_name_list=task_name_list, use_subset=use_subset,
)
def get_val_labels_dict(self, task_name_list, use_subset=False, label_phase = "val"):
val_labels_dict = {}
for task_name in task_name_list:
task_specific_config = self.jiant_task_container.task_specific_configs[task_name]
val_labels_cache = self.jiant_task_container.task_cache_dict[task_name][label_phase + "_labels"]
val_labels = val_labels_cache.get_all()
if use_subset:
val_labels = val_labels[: task_specific_config.eval_subset_num]
val_labels_dict[task_name] = val_labels
return val_labels_dict
def get_test_dataloader_dict(self):
return self._get_eval_dataloader_dict(
task_name_list=self.jiant_task_container.task_run_config.test_task_list,
phase=PHASE.TEST,
)
def complex_backpropagate(self, loss, gradient_accumulation_steps, retain_graph = False):
return complex_backpropagate(
loss=loss,
optimizer=self.optimizer_scheduler.optimizer,
model=self.jiant_model,
fp16=self.rparams.fp16,
n_gpu=self.rparams.n_gpu,
gradient_accumulation_steps=gradient_accumulation_steps,
max_grad_norm=self.rparams.max_grad_norm,
retain_graph = retain_graph
)
def get_runner_state(self):
# TODO: Add fp16 (issue #1186)
state = {
"model": torch_utils.get_model_for_saving(self.jiant_model).state_dict(),
"optimizer": self.optimizer_scheduler.optimizer.state_dict(),
}
return state
def load_state(self, runner_state):
torch_utils.get_model_for_saving(self.jiant_model).load_state_dict(runner_state["model"])
self.optimizer_scheduler.optimizer.load_state_dict(runner_state["optimizer"])
class CheckpointSaver:
def __init__(self, metadata, save_path):
self.metadata = metadata
self.save_path = save_path
def save(self, runner_state: dict, metarunner_state: dict):
to_save = {
"runner_state": runner_state,
"metarunner_state": metarunner_state,
"metadata": self.metadata,
}
torch_utils.safe_save(to_save, self.save_path)
def run_val(
val_dataloader,
val_labels,
jiant_model: JiantModel,
task,
device,
local_rank,
return_preds=False,
verbose=True,
tag="Val",
user_mode = None,
):
# Reminder:
# val_dataloader contains mostly PyTorch-relevant info
# val_labels might contain more details information needed for full evaluation
if not local_rank == -1:
return
jiant_model.eval()
total_eval_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task=task)
eval_accumulator = evaluation_scheme.get_accumulator()
for step, (batch, batch_metadata) in enumerate(
maybe_tqdm(val_dataloader, desc=f"Eval ({task.name}, {tag})", verbose=verbose)
):
batch = batch.to(device)
if user_mode is not None and 'prompt' in user_mode:
ptsize = int(user_mode['prompt'])
input_ids = batch.input_ids.new_zeros([batch.input_ids.shape[0], ptsize])
input_ids[:] = torch.arange(ptsize) + jiant_model.encoder.embeddings.word_embeddings.num_embeddings
batch.input_ids = torch.cat([input_ids, batch.input_ids], 1)
batch.input_mask = torch.cat([torch.ones_like(input_ids), batch.input_mask], 1)
batch.segment_ids = torch.cat([torch.zeros_like(input_ids), batch.segment_ids], 1)
with torch.no_grad():
model_output = wrap_jiant_forward(
jiant_model=jiant_model, batch=batch, task=task, compute_loss=True,
)
batch_logits = model_output.logits.detach().cpu().numpy()
batch_loss = model_output.loss.mean().item()
total_eval_loss += batch_loss
eval_accumulator.update(
batch_logits=batch_logits,
batch_loss=batch_loss,
batch=batch,
batch_metadata=batch_metadata,
)
nb_eval_examples += len(batch)
nb_eval_steps += 1
eval_loss = total_eval_loss / nb_eval_steps
tokenizer = (
jiant_model.tokenizer
if not torch_utils.is_data_parallel(jiant_model)
else jiant_model.module.tokenizer
)
output = {
"accumulator": eval_accumulator,
"loss": eval_loss,
"metrics": evaluation_scheme.compute_metrics_from_accumulator(
task=task, accumulator=eval_accumulator, labels=val_labels, tokenizer=tokenizer,
),
}
if return_preds:
output["preds"] = evaluation_scheme.get_preds_from_accumulator(
task=task, accumulator=eval_accumulator,
)
return output
def run_test(
test_dataloader,
jiant_model: JiantModel,
task,
device,
local_rank,
verbose=True,
return_preds=True,
):
if not local_rank == -1:
return
jiant_model.eval()
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task=task)
eval_accumulator = evaluation_scheme.get_accumulator()
for step, (batch, batch_metadata) in enumerate(
maybe_tqdm(test_dataloader, desc=f"Eval ({task.name}, Test)", verbose=verbose)
):
batch = batch.to(device)
with torch.no_grad():
model_output = wrap_jiant_forward(
jiant_model=jiant_model, batch=batch, task=task, compute_loss=False,
)
batch_logits = model_output.logits.detach().cpu().numpy()
eval_accumulator.update(
batch_logits=batch_logits, batch_loss=0, batch=batch, batch_metadata=batch_metadata,
)
output = {
"accumulator": eval_accumulator,
}
if return_preds:
output["preds"] = evaluation_scheme.get_preds_from_accumulator(
task=task, accumulator=eval_accumulator,
)
return output
| 45,479 | 47.177966 | 202 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/model_setup.py
|
import transformers
import torch
from jiant.ext.radam import RAdam
class OptimizerScheduler:
def __init__(self, optimizer, scheduler):
super().__init__()
self.optimizer = optimizer
self.scheduler = scheduler
def step(self):
self.optimizer.step()
self.scheduler.step()
def state_dict(self):
return {
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
}
def load_state_dict(self, state_dict, strict=True):
self.optimizer.load_state_dict(state_dict["optimizer"], strict=strict)
self.scheduler.load_state_dict(state_dict["scheduler"], strict=strict)
def create_optimizer(
model,
learning_rate,
t_total,
warmup_steps,
warmup_proportion,
optimizer_epsilon=1e-8,
optimizer_type="adam",
verbose=False,
):
return create_optimizer_from_params(
named_parameters=list(model.named_parameters()),
learning_rate=learning_rate,
t_total=t_total,
warmup_steps=warmup_steps,
warmup_proportion=warmup_proportion,
optimizer_epsilon=optimizer_epsilon,
optimizer_type=optimizer_type,
verbose=verbose,
)
def create_optimizer_from_params(
named_parameters,
learning_rate,
t_total,
warmup_steps,
warmup_proportion,
optimizer_epsilon=1e-8,
optimizer_type="adam",
verbose=False,
):
# Prepare optimizer
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"adapter.down_project.weight",
"adapter.up_project.weight",
"weighted_sum.weights",
]
if verbose:
print("No optimizer decay for:")
for n, p in named_parameters:
if any(nd in n for nd in no_decay):
print(f" {n}")
used_named_parameters = [
(n, p) for n, p in named_parameters if p.requires_grad and "weighted_sum.weights" not in n and p.is_leaf
]
weighted_sum_params = [
(n, p) for n, p in named_parameters if p.requires_grad and "weighted_sum.weights" in n and p.is_leaf
]
optimizer_grouped_parameters = [
{
"params": [p for n, p in used_named_parameters if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in used_named_parameters if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
{"params": [p for n, p in weighted_sum_params], "weight_decay": 0.0, "lr": 0.01},
]
if optimizer_type == "adam":
if verbose:
print("Using AdamW")
optimizer = transformers.AdamW(
optimizer_grouped_parameters, lr=learning_rate, eps=optimizer_epsilon
)
elif optimizer_type == "radam":
if verbose:
print("Using RAdam")
optimizer = RAdam(optimizer_grouped_parameters, lr=learning_rate, eps=optimizer_epsilon)
else:
raise KeyError(optimizer_type)
warmup_steps = resolve_warmup_steps(
t_total=t_total, warmup_steps=warmup_steps, warmup_proportion=warmup_proportion,
)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
optimizer_scheduler = OptimizerScheduler(optimizer=optimizer, scheduler=scheduler)
return optimizer_scheduler
def resolve_warmup_steps(t_total, warmup_steps, warmup_proportion):
if warmup_steps is None and warmup_proportion is None:
raise RuntimeError()
elif warmup_steps is not None and warmup_proportion is not None:
raise RuntimeError()
elif warmup_steps is None and warmup_proportion is not None:
return warmup_proportion * t_total
elif warmup_steps is not None and warmup_proportion is None:
return warmup_steps
else:
raise RuntimeError()
def fp16ize(model, optimizer, fp16_opt_level):
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
return model, optimizer
def parallelize_gpu(model):
return torch.nn.DataParallel(model)
def parallelize_dist(model, local_rank):
return torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
)
def raw_special_model_setup(model, optimizer, fp16, fp16_opt_level, n_gpu, local_rank):
"""Perform setup for special modes (e.g., FP16, DataParallel, and/or DistributedDataParallel.
Args:
model (nn.Module): torch model object.
optimizer: TODO
fp16 (bool): True to enable FP16 mode.
fp16_opt_level (str): Apex AMP optimization level default mode identifier.
n_gpu: number of GPUs.
local_rank (int): Which GPU the script should use in DistributedDataParallel mode.
Notes:
Initialization steps performed in init_cuda_from_args() set n_gpu = 1 when local_rank != -1.
Returns:
Model and optimizer with the specified special configuration.
"""
if fp16:
model, optimizer = fp16ize(model=model, optimizer=optimizer, fp16_opt_level=fp16_opt_level)
if n_gpu > 1:
model = parallelize_gpu(model=model)
if local_rank != -1:
model = parallelize_dist(model=model, local_rank=local_rank)
return model, optimizer
def special_model_setup(
model_wrapper, optimizer_scheduler, fp16, fp16_opt_level, n_gpu, local_rank
):
model, optimizer = raw_special_model_setup(
model=model_wrapper.model,
optimizer=optimizer_scheduler.optimizer,
fp16=fp16,
fp16_opt_level=fp16_opt_level,
n_gpu=n_gpu,
local_rank=local_rank,
)
model_wrapper.model = model
optimizer_scheduler.optimizer = optimizer
| 6,050 | 30.515625 | 112 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/main_runscript.py
|
import os
import torch
import datetime
import jiant.proj.main.modeling.model_setup as jiant_model_setup
import runner as jiant_runner
#import jiant.proj.main.components.container_setup as container_setup
#import jiant.proj.main.metarunner as jiant_metarunner
import metarunner as jiant_metarunner
#import jiant.proj.main.components.evaluate as jiant_evaluate
import evaluate as jiant_evaluate
import jiant.shared.initialization as initialization
import jiant.shared.distributed as distributed
#import jiant.shared.model_setup as model_setup
import model_setup
import jiant.utils.python.io as py_io
import jiant.utils.zconf as zconf
import zlog # maple
import container_setup # maple
@zconf.run_config
class RunConfiguration(zconf.RunConfig):
# === Required parameters === #
jiant_task_container_config_path = zconf.attr(type=str, required=True)
output_dir = zconf.attr(type=str, required=True)
# === Model parameters === #
hf_pretrained_model_name_or_path = zconf.attr(type=str, required=True)
model_path = zconf.attr(type=str, required=True)
model_config_path = zconf.attr(default=None, type=str)
model_load_mode = zconf.attr(default="from_transformers", type=str)
# === Running Setup === #
do_train = zconf.attr(action="store_true")
do_val = zconf.attr(action="store_true")
do_save = zconf.attr(action="store_true")
do_save_last = zconf.attr(action="store_true")
do_save_best = zconf.attr(action="store_true")
write_val_preds = zconf.attr(action="store_true")
write_test_preds = zconf.attr(action="store_true")
eval_every_steps = zconf.attr(type=int, default=0)
min_train_steps = zconf.attr(type=int, default=0)# maple
save_every_steps = zconf.attr(type=int, default=0)
save_checkpoint_every_steps = zconf.attr(type=int, default=0)
no_improvements_for_n_evals = zconf.attr(type=int, default=0)
keep_checkpoint_when_done = zconf.attr(action="store_true")
force_overwrite = zconf.attr(action="store_true")
seed = zconf.attr(type=int, default=-1)
# === Training Learning Parameters === #
learning_rate = zconf.attr(default=1e-5, type=float)
adam_epsilon = zconf.attr(default=1e-8, type=float)
max_grad_norm = zconf.attr(default=1.0, type=float)
optimizer_type = zconf.attr(default="adam", type=str)
# Specialized config
no_cuda = zconf.attr(action="store_true")
fp16 = zconf.attr(action="store_true")
fp16_opt_level = zconf.attr(default="O1", type=str)
local_rank = zconf.attr(default=-1, type=int)
server_ip = zconf.attr(default="", type=str)
server_port = zconf.attr(default="", type=str)
@zconf.run_config
class ResumeConfiguration(zconf.RunConfig):
checkpoint_path = zconf.attr(type=str)
def setup_runner(
args: RunConfiguration,
jiant_task_container: container_setup.JiantTaskContainer,
quick_init_out,
verbose: bool = True,
) -> jiant_runner.JiantRunner:
"""Setup jiant model, optimizer, and runner, and return runner.
Args:
args (RunConfiguration): configuration carrying command line args specifying run params.
jiant_task_container (container_setup.JiantTaskContainer): task and sampler configs.
quick_init_out (QuickInitContainer): device (GPU/CPU) and logging configuration.
verbose: If True, enables printing configuration info (to standard out).
Returns:
jiant_runner.JiantRunner
"""
# TODO document why the distributed.only_first_process() context manager is being used here.
with distributed.only_first_process(local_rank=args.local_rank):
# load the model
jiant_model = jiant_model_setup.setup_jiant_model(
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
model_config_path=args.model_config_path,
task_dict=jiant_task_container.task_dict,
taskmodels_config=jiant_task_container.taskmodels_config,
)
jiant_model_setup.delegate_load_from_path(
jiant_model=jiant_model, weights_path=args.model_path, load_mode=args.model_load_mode
)
jiant_model.to(quick_init_out.device)
user_mode = {e.split('=')[0] : e.split('=')[1] if len(e.split('=')) > 1 else None for e in (args.user_mode[0].split(',') if type(args.user_mode) is not str else args.user_mode.split(',')) }
if 'sctmask' in user_mode:
import numpy as np
parms = dict(jiant_model.named_parameters())
max_len = max([parms[n].numel() for n in parms])
max_ids = list(range(max_len))
jiant_model.encoder.idx_dict = {}
for name in parms:
if not name.startswith('encoder'):
continue
fpar = parms[name].view(-1)
xname = "sctmask__" + name.replace(".", "__")
idx = torch.tensor(np.random.choice(max_ids[:len(fpar)], size = int(len(fpar) * float(user_mode['sctmask'])), replace = False)).to(fpar.device)
x = fpar[idx].detach()
x.requires_grad = True
jiant_model.encoder.idx_dict[xname] = idx
setattr(jiant_model.encoder, xname, torch.nn.Parameter(x))
x = getattr(jiant_model.encoder, xname)
parms[name].requires_grad = False
parms[name].flatten()[idx] = x
elif 'embtune' in user_mode:
parms = dict(jiant_model.named_parameters())
for p in parms:
if 'embeddings' not in p and 'taskmodels_dict' not in p:
parms[p].requires_grad = False
elif 'prompt' in user_mode:
parms = dict(jiant_model.named_parameters())
for name in parms:
if name.startswith('encoder'):
parms[name].requires_grad = False
jiant_model.encoder.embeddings.prompt_weight = torch.nn.Parameter(jiant_model.encoder.embeddings.word_embeddings.weight.new_zeros([int(user_mode['prompt']), 768]))
torch.nn.init.xavier_uniform_(jiant_model.encoder.embeddings.prompt_weight)
jiant_model.encoder.embeddings.prompt_weight.requires_grad = True
jiant_model.encoder.embeddings.word_embeddings.weight_ori = jiant_model.encoder.embeddings.word_embeddings.weight
jiant_model.encoder.embeddings.word_embeddings.weight = torch.nn.Parameter(torch.cat([jiant_model.encoder.embeddings.word_embeddings.weight_ori, jiant_model.encoder.embeddings.prompt_weight]))
elif 'diffprun' in user_mode:
parms = dict(jiant_model.named_parameters())
jiant_model.ori_pars = {}
for p in parms:
w = torch.zeros_like(parms[p])
wname = "w__" + p.replace(".", "__")
setattr(jiant_model.encoder, wname, torch.nn.Parameter(w))
bername = "ber__" + p.replace(".", "__")
ber = torch.randn_like(parms[p]).sigmoid()
setattr(jiant_model.encoder, bername, torch.nn.Parameter(ber))
jiant_model.ori_pars[p] = parms[p].data
parms[p].requires_grad = False
parms[p].detach_()
elif 'adapter' in user_mode:
jiant_model.encoder.add_adapter("adapter")
jiant_model.encoder.train_adapter("adapter")
jiant_model.encoder.to(jiant_model.encoder.device)
elif 'lora' in user_mode:
import loralib as lora
import math
import torch.nn as nn
r = int(user_mode['lora']) if ('lora' in user_mode) else 16
def set_lora(attn , name):
linear = getattr(attn, name)
q = lora.Linear(linear.in_features, linear.out_features, r).to(linear.weight.device)
q.weight.data[:] = linear.weight.data[:]
q.bias.data[:] = linear.bias.data[:]
nn.init.kaiming_uniform_(q.lora_B, a=math.sqrt(5))
setattr(attn, name, q)
for i in range(len(jiant_model.encoder.encoder.layer)):
set_lora(jiant_model.encoder.encoder.layer[i].attention.self, 'query')
set_lora(jiant_model.encoder.encoder.layer[i].attention.self, 'key')
set_lora(jiant_model.encoder.encoder.layer[i].attention.self, 'value')
set_lora(jiant_model.encoder.encoder.layer[i].attention.output, 'dense')
set_lora(jiant_model.encoder.encoder.layer[i].intermediate, 'dense')
set_lora(jiant_model.encoder.encoder.layer[i].output, 'dense')
lora.mark_only_lora_as_trainable(jiant_model)
optimizer_scheduler = model_setup.create_optimizer(
model=jiant_model,
learning_rate=args.learning_rate,
t_total=jiant_task_container.global_train_config.max_steps,
warmup_steps=jiant_task_container.global_train_config.warmup_steps,
warmup_proportion=None,
optimizer_type=args.optimizer_type,
verbose=verbose,
)
jiant_model, optimizer = model_setup.raw_special_model_setup(
model=jiant_model,
optimizer=optimizer_scheduler.optimizer,
fp16=args.fp16,
fp16_opt_level=args.fp16_opt_level,
n_gpu=quick_init_out.n_gpu,
local_rank=args.local_rank,
)
optimizer_scheduler.optimizer = optimizer
rparams = jiant_runner.RunnerParameters(
local_rank=args.local_rank,
n_gpu=quick_init_out.n_gpu,
fp16=args.fp16,
max_grad_norm=args.max_grad_norm,
)
runner = jiant_runner.JiantRunner(
jiant_task_container=jiant_task_container,
jiant_model=jiant_model,
optimizer_scheduler=optimizer_scheduler,
device=quick_init_out.device,
rparams=rparams,
log_writer=quick_init_out.log_writer,
)
runner.user_mode = user_mode
if 'mixout' in user_mode:
import copy
runner.encoder0 = copy.deepcopy(jiant_model.encoder)
return runner
def run_loop(args: RunConfiguration, checkpoint=None):
is_resumed = checkpoint is not None
quick_init_out = initialization.quick_init(args=args, verbose=True)
# maple
quick_init_out.log_writer = zlog.ZLogger(os.path.join(args.log_dir, datetime.datetime.now().strftime("%Y%m%d%H%m%S")), overwrite=True)
print(quick_init_out.n_gpu)
with quick_init_out.log_writer.log_context():
jiant_task_container = container_setup.create_jiant_task_container_from_json(
jiant_task_container_config_path=args.jiant_task_container_config_path, verbose=True,
)
runner = setup_runner(
args=args,
jiant_task_container=jiant_task_container,
quick_init_out=quick_init_out,
verbose=True,
)
if is_resumed:
runner.load_state(checkpoint["runner_state"])
del checkpoint["runner_state"]
checkpoint_saver = jiant_runner.CheckpointSaver(
metadata={"args": args.to_dict()},
save_path=os.path.join(args.output_dir, "checkpoint.p"),
)
if args.do_train:
metarunner = jiant_metarunner.JiantMetarunner(
runner=runner,
save_every_steps=args.save_every_steps,
eval_every_steps=args.eval_every_steps,
min_train_steps = args.min_train_steps,
save_checkpoint_every_steps=args.save_checkpoint_every_steps,
no_improvements_for_n_evals=args.no_improvements_for_n_evals,
checkpoint_saver=checkpoint_saver,
output_dir=args.output_dir,
verbose=True,
save_best_model=args.do_save or args.do_save_best,
save_last_model=args.do_save or args.do_save_last,
load_best_model=True,
log_writer=quick_init_out.log_writer,
)
if is_resumed:
metarunner.load_state(checkpoint["metarunner_state"])
del checkpoint["metarunner_state"]
metarunner.run_train_loop()
if args.do_val:
val_results_dict = runner.run_val(
task_name_list=runner.jiant_task_container.task_run_config.val_task_list,
return_preds=args.write_val_preds,
)
jiant_evaluate.write_val_results(
val_results_dict=val_results_dict,
metrics_aggregator=runner.jiant_task_container.metrics_aggregator,
output_dir=args.output_dir,
verbose=True,
)
if args.write_val_preds:
jiant_evaluate.write_preds(
eval_results_dict=val_results_dict,
path=os.path.join(args.output_dir, "val_preds.p"),
)
else:
assert not args.write_val_preds
if args.do_test:#maple
test_results_dict = runner.run_val(
task_name_list=runner.jiant_task_container.task_run_config.test_task_list,
return_preds=False,
phase = "test"
)
jiant_evaluate.write_val_results(
val_results_dict=test_results_dict,
metrics_aggregator=runner.jiant_task_container.metrics_aggregator,
output_dir=args.output_dir,
verbose=True,
result_file = "test_metrics.json"
)
train_results_dict = runner.run_val(
task_name_list=runner.jiant_task_container.task_run_config.test_task_list,
return_preds=False,
phase = "train"
)
jiant_evaluate.write_val_results(
val_results_dict=train_results_dict,
metrics_aggregator=runner.jiant_task_container.metrics_aggregator,
output_dir=args.output_dir,
verbose=True,
result_file = "train_metrics.json"
)
if args.write_test_preds:
test_results_dict = runner.run_test(
task_name_list=runner.jiant_task_container.task_run_config.test_task_list,
)
jiant_evaluate.write_preds(
eval_results_dict=test_results_dict,
path=os.path.join(args.output_dir, "test_preds.p"),
)
if (
not args.keep_checkpoint_when_done
and args.save_checkpoint_every_steps
and os.path.exists(os.path.join(args.output_dir, "checkpoint.p"))
):
os.remove(os.path.join(args.output_dir, "checkpoint.p"))
py_io.write_file("DONE", os.path.join(args.output_dir, "done_file"))
def run_resume(args: ResumeConfiguration):
resume(checkpoint_path=args.checkpoint_path)
def resume(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
args = RunConfiguration.from_dict(checkpoint["metadata"]["args"])
run_loop(args=args, checkpoint=checkpoint)
def run_with_continue(cl_args):
run_args = RunConfiguration.default_run_cli(cl_args=cl_args)
if not run_args.force_overwrite and (
os.path.exists(os.path.join(run_args.output_dir, "done_file"))
or os.path.exists(os.path.join(run_args.output_dir, "val_metrics.json"))
):
print("Already Done")
return
elif run_args.save_checkpoint_every_steps and os.path.exists(
os.path.join(run_args.output_dir, "checkpoint.p")
):
print("Resuming")
resume(os.path.join(run_args.output_dir, "checkpoint.p"))
else:
print("Running from start")
run_loop(args=run_args)
def main():
mode, cl_args = zconf.get_mode_and_cl_args()
if mode == "run":
run_loop(RunConfiguration.default_run_cli(cl_args=cl_args))
elif mode == "continue":
run_resume(ResumeConfiguration.default_run_cli(cl_args=cl_args))
elif mode == "run_with_continue":
run_with_continue(cl_args=cl_args)
else:
raise zconf.ModeLookupError(mode)
if __name__ == "__main__":
main()
| 15,719 | 41.95082 | 200 |
py
|
AnalyzeParameterEfficientFinetune
|
AnalyzeParameterEfficientFinetune-main/src/tokenize_and_cache.py
|
import os
from transformers import AutoConfig
from transformers import AutoTokenizer
import jiant.proj.main.preprocessing as preprocessing
import jiant.shared.caching as shared_caching
import jiant.tasks.evaluate as evaluate
import jiant.utils.python.io as py_io
import jiant.utils.zconf as zconf
from jiant.proj.main.modeling.primary import JiantTransformersModelFactory
from jiant.shared.constants import PHASE
from jiant.tasks.retrieval import create_task_from_config_path
from jiant.proj.main.tokenize_and_cache import *
def main(args: RunConfiguration):
config = AutoConfig.from_pretrained(args.hf_pretrained_model_name_or_path)
model_type = config.model_type
task = create_task_from_config_path(config_path=args.task_config_path, verbose=True)
feat_spec = JiantTransformersModelFactory.build_featurization_spec(
model_type=model_type, max_seq_length=args.max_seq_length,
)
tokenizer = AutoTokenizer.from_pretrained(args.hf_pretrained_model_name_or_path, use_fast=False)
if isinstance(args.phases, str):
phases = args.phases.split(",")
else:
phases = args.phases
#assert set(phases) <= {PHASE.TRAIN, PHASE.VAL, PHASE.TEST}
#maple remove
paths_dict = {}
os.makedirs(args.output_dir, exist_ok=True)
if PHASE.TRAIN in phases:
chunk_and_save(
task=task,
phase=PHASE.TRAIN,
examples=task.get_train_examples(),
feat_spec=feat_spec,
tokenizer=tokenizer,
args=args,
)
paths_dict["train"] = os.path.join(args.output_dir, PHASE.TRAIN)
if PHASE.VAL in phases:
val_examples = task.get_val_examples()
chunk_and_save(
task=task,
phase=PHASE.VAL,
examples=val_examples,
feat_spec=feat_spec,
tokenizer=tokenizer,
args=args,
)
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task)
shared_caching.chunk_and_save(
data=evaluation_scheme.get_labels_from_cache_and_examples(
task=task,
cache=shared_caching.ChunkedFilesDataCache(
os.path.join(args.output_dir, PHASE.VAL)
),
examples=val_examples,
),
chunk_size=args.chunk_size,
data_args=args.to_dict(),
output_dir=os.path.join(args.output_dir, "val_labels"),
)
paths_dict[PHASE.VAL] = os.path.join(args.output_dir, PHASE.VAL)
paths_dict["val_labels"] = os.path.join(args.output_dir, "val_labels")
if PHASE.TEST in phases:
chunk_and_save(
task=task,
phase=PHASE.TEST,
examples=task.get_test_examples(),
feat_spec=feat_spec,
tokenizer=tokenizer,
args=args,
)
paths_dict[PHASE.TEST] = os.path.join(args.output_dir, PHASE.TEST)
if "test_labels" in phases:
from jiant.utils.python.io import read_jsonl
test_examples = task._create_examples(lines=read_jsonl(task.test_path), set_type="val")
chunk_and_save(
task=task,
phase=PHASE.TEST,
examples=test_examples,
feat_spec=feat_spec,
tokenizer=tokenizer,
args=args,
)
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task)
shared_caching.chunk_and_save(
data=evaluation_scheme.get_labels_from_cache_and_examples(
task=task,
cache=shared_caching.ChunkedFilesDataCache(
os.path.join(args.output_dir, PHASE.TEST)
),
examples=test_examples,
),
chunk_size=args.chunk_size,
data_args=args.to_dict(),
output_dir=os.path.join(args.output_dir, "test_labels"),
)
paths_dict["test_labels"] = os.path.join(args.output_dir, "test_labels")
#===for train labels:
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task)
shared_caching.chunk_and_save(
data=evaluation_scheme.get_labels_from_cache_and_examples(
task=task,
cache=shared_caching.ChunkedFilesDataCache(
os.path.join(args.output_dir, PHASE.TRAIN)
),
examples=task.get_train_examples(),
),
chunk_size=args.chunk_size,
data_args=args.to_dict(),
output_dir=os.path.join(args.output_dir, "train_labels"),
)
paths_dict["train_labels"] = os.path.join(args.output_dir, "train_labels")
if not args.skip_write_output_paths:
py_io.write_json(data=paths_dict, path=os.path.join(args.output_dir, "paths.json"))
if __name__ == "__main__":
main(args=RunConfiguration.run_cli_json_prepend())
| 4,884 | 36.290076 | 100 |
py
|
pdbfixer
|
pdbfixer-master/setup.py
|
"""pdbfixer: Fixes problems in PDB files
Protein Data Bank (PDB) files often have a number of problems that must be
fixed before they can be used in a molecular dynamics simulation. The details
vary depending on how the file was generated. Here are some of the most common
ones:
- If the structure was generated by X-ray crystallography, most or all of the
- hydrogen atoms will usually be missing.
- There may also be missing heavy atoms in flexible regions that could not be
clearly resolved from the electron density. This may include anything from a
few atoms at the end of a sidechain to entire loops.
- Many PDB files are also missing terminal atoms that should be present at the
ends of chains.
- The file may include nonstandard residues that were added for crystallography
purposes, but are not present in the naturally occurring molecule you want to
simulate.
- The file may include more than what you want to simulate. For example, there
may be salts, ligands, or other molecules that were added for experimental
purposes. Or the crystallographic unit cell may contain multiple copies of a
protein, but you only want to simulate a single copy.
- There may be multiple locations listed for some atoms.
- If you want to simulate the structure in explicit solvent, you will need to
add a water box surrounding it.
PDBFixer can fix all of these problems for you in a fully automated way. You
simply select a file, tell it which problems to fix, and it does everything else.
"""
from __future__ import print_function
import os
import sys
from os.path import relpath, join
from setuptools import setup, find_packages
DOCLINES = __doc__.split("\n")
########################
__version__ = '1.9.0'
VERSION = __version__
ISRELEASED = False
########################
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
def find_package_data():
files = []
for root, dirnames, filenames in os.walk('pdbfixer'):
for fn in filenames:
files.append(relpath(join(root, fn), 'pdbfixer'))
return files
setup(
name='pdbfixer',
author='Peter Eastman',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
license='MIT',
url='https://github.com/openmm/pdbfixer',
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
package_data={'pdbfixer': find_package_data()},
zip_safe=False,
install_requires=['numpy', 'openmm >= 7.1'],
entry_points={'console_scripts': ['pdbfixer = pdbfixer.pdbfixer:main']})
| 3,004 | 36.5625 | 81 |
py
|
pdbfixer
|
pdbfixer-master/devtools/createSoftForcefield.py
|
"""
createSoftForceField.py: Creates a force field XML file that is suitable
for removing clashes in very badly strained systems.
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2013 Stanford University and the Authors.
Authors: Peter Eastman
Contributors:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
__author__ = "Peter Eastman"
__version__ = "1.0"
import openmm.app as app
import openmm.app.element as elem
import openmm.app.forcefield as ff
forcefield = app.ForceField('amber99sbildn.xml', 'tip3p.xml')
bondK = 10000.0
angleK = 10.0
# Create the new force field file.
print('<ForceField>')
# Print the atom types, while identifying types and classes to omit.
print(' <AtomTypes>')
omitTypes = set()
omitClasses = set()
for atomType in forcefield._atomTypes:
(atomClass, mass, element) = forcefield._atomTypes[atomType]
if element is None or element == elem.hydrogen:
omitTypes.add(atomType)
omitClasses.add(atomClass)
else:
print(' <Type name="%s" class="%s" element="%s" mass="%g"/>' % (atomType, atomClass, element.symbol, mass))
print(' </AtomTypes>')
# Print the residue templates.
print(' <Residues>')
for template in forcefield._templates.values():
print(' <Residue name="%s">' % template.name)
atomIndex = {}
for i, atom in enumerate(template.atoms):
if atom.type not in omitTypes:
print(' <Atom name="%s" type="%s"/>' % (atom.name, atom.type))
atomIndex[i] = len(atomIndex)
for (a1, a2) in template.bonds:
if a1 in atomIndex and a2 in atomIndex:
print(' <Bond from="%d" to="%d"/>' % (atomIndex[a1], atomIndex[a2]))
for atom in template.externalBonds:
if atom in atomIndex:
print(' <ExternalBond from="%d"/>' % atomIndex[atom])
print(' </Residue>')
print(' </Residues>')
# Print the harmonic bonds.
print(' <HarmonicBondForce>')
bonds = [f for f in forcefield._forces if isinstance(f, ff.HarmonicBondGenerator)][0]
for i in range(len(bonds.types1)):
type1 = next(iter(bonds.types1[i]))
type2 = next(iter(bonds.types2[i]))
if type1 not in omitTypes and type2 not in omitTypes:
class1 = forcefield._atomTypes[type1][0]
class2 = forcefield._atomTypes[type2][0]
print(' <Bond class1="%s" class2="%s" length="%g" k="%g"/>' % (class1, class2, bonds.length[i], bondK))
print(' </HarmonicBondForce>')
# Print the harmonic angles.
print(' <HarmonicAngleForce>')
angles = [f for f in forcefield._forces if isinstance(f, ff.HarmonicAngleGenerator)][0]
for i in range(len(angles.types1)):
type1 = next(iter(angles.types1[i]))
type2 = next(iter(angles.types2[i]))
type3 = next(iter(angles.types3[i]))
if type1 not in omitTypes and type2 not in omitTypes and type3 not in omitTypes:
class1 = forcefield._atomTypes[type1][0]
class2 = forcefield._atomTypes[type2][0]
class3 = forcefield._atomTypes[type3][0]
print(' <Angle class1="%s" class2="%s" class3="%s" angle="%g" k="%g"/>' % (class1, class2, class3, angles.angle[i], angleK))
print(' </HarmonicAngleForce>')
# Print the periodic torsions.
print(' <PeriodicTorsionForce>')
torsions = [f for f in forcefield._forces if isinstance(f, ff.PeriodicTorsionGenerator)][0]
for torsion in torsions.proper:
type1 = next(iter(torsion.types1))
type2 = next(iter(torsion.types2))
type3 = next(iter(torsion.types3))
type4= next(iter(torsion.types4))
if type1 not in omitTypes and type2 not in omitTypes and type3 not in omitTypes and type4 not in omitTypes:
class1 = forcefield._atomTypes[type1][0]
class2 = forcefield._atomTypes[type2][0]
class3 = forcefield._atomTypes[type3][0]
class4 = forcefield._atomTypes[type4][0]
print(' <Proper class1="%s" class2="%s" class3="%s" class4="%s"' % (class1, class2, class3, class4), end=' ')
for i in range(len(torsion.k)):
print(' periodicity%d="%d" phase%d="%g" k%d="%g"' % (i+1, torsion.periodicity[i], i+1, torsion.phase[i], i+1, torsion.k[i]), end=' ')
print('/>')
for torsion in torsions.improper:
type1 = next(iter(torsion.types1))
type2 = next(iter(torsion.types2))
type3 = next(iter(torsion.types3))
type4= next(iter(torsion.types4))
if type1 not in omitTypes and type2 not in omitTypes and type3 not in omitTypes and type4 not in omitTypes:
class1 = forcefield._atomTypes[type1][0]
class2 = forcefield._atomTypes[type2][0]
class3 = forcefield._atomTypes[type3][0]
class4 = forcefield._atomTypes[type4][0]
print(' <Improper class1="%s" class2="%s" class3="%s" class4="%s"' % (class1, class2, class3, class4), end=' ')
for i in range(len(torsion.k)):
print(' periodicity%d="%d" phase%d="%g" k%d="%g"' % (i+1, torsion.periodicity[i], i+1, torsion.phase[i], i+1, torsion.k[i]), end=' ')
print('/>')
print(' </PeriodicTorsionForce>')
# Print the script to add the soft-core nonbonded force.
print(' <Script>')
print("""import openmm as mm
nb = mm.CustomNonbondedForce('C/((r/0.2)^4+1)')
nb.addGlobalParameter('C', 1.0)
sys.addForce(nb)
for i in range(sys.getNumParticles()):
nb.addParticle([])
exclusions = set()
for bond in data.bonds:
exclusions.add((min(bond.atom1, bond.atom2), max(bond.atom1, bond.atom2)))
for angle in data.angles:
exclusions.add((min(angle[0], angle[2]), max(angle[0], angle[2])))
for a1, a2 in exclusions:
nb.addExclusion(a1, a2)""")
print(' </Script>')
print('</ForceField>')
| 6,801 | 40.987654 | 145 |
py
|
pdbfixer
|
pdbfixer-master/devtools/travis-ci/push-docs-to-s3.py
|
#!/usr/bin/env python
"""
Must have the vollowing environment variables defined:
* BUCKET_NAME : AWS bucket name
* PREFIX : 'latest' or other version number
"""
import os
import pip
import tempfile
import subprocess
import thermopyl.version
BUCKET_NAME = 'thermopyl.org'
if not thermopyl.version.release:
PREFIX = 'latest'
else:
PREFIX = thermopyl.version.short_version
if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()):
raise ImportError('The s3cmd package is required. try $ pip install s3cmd')
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
with tempfile.NamedTemporaryFile('w') as f:
f.write('''[default]
access_key = {AWS_ACCESS_KEY_ID}
secret_key = {AWS_SECRET_ACCESS_KEY}
'''.format(**os.environ))
f.flush()
template = ('s3cmd --guess-mime-type --config {config} '
'sync docs/_build/ s3://{bucket}/{prefix}/')
cmd = template.format(
config=f.name,
bucket=BUCKET_NAME,
prefix=PREFIX)
return_val = subprocess.call(cmd.split())
# Sync index file.
template = ('s3cmd --guess-mime-type --config {config} '
'sync devtools/ci/index.html s3://{bucket}/')
cmd = template.format(
config=f.name,
bucket=BUCKET_NAME)
return_val = subprocess.call(cmd.split())
| 1,410 | 27.795918 | 81 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/ui.py
|
from __future__ import absolute_import
import webbrowser
import os.path
import time
import sys
from math import sqrt
import openmm.app as app
import openmm.unit as unit
from openmm.vec3 import Vec3
from .pdbfixer import PDBFixer, proteinResidues, dnaResidues, rnaResidues, _guessFileFormat
from . import uiserver
if sys.version_info >= (3,0):
from io import StringIO
else:
from cStringIO import StringIO
def loadHtmlFile(name):
htmlPath = os.path.join(os.path.dirname(__file__), 'html')
file = os.path.join(htmlPath, name)
return open(file).read()
cachedImages = {}
def loadImageFile(name):
global cachedImages
if name not in cachedImages:
imagePath = os.path.join(os.path.dirname(__file__), 'images')
file = os.path.join(imagePath, name)
cachedImages[name] = open(file, 'rb').read()
return cachedImages[name]
def controlsCallback(parameters, handler):
if 'newfile' in parameters:
displayStartPage()
if 'quit' in parameters:
handler.sendResponse(loadHtmlFile("quit.html"))
uiserver.server.shutdown()
global uiIsRunning
uiIsRunning = False
def imageCallback(parameters, handler):
name = parameters['name'][0]
image = loadImageFile(name)
type = None
if name.endswith('.png'):
type = 'image/png'
elif name.endswith('.jpeg') or name.endswith('.jpg'):
type = 'image/jpeg'
handler.sendResponse(image, type=type)
def startPageCallback(parameters, handler):
global fixer
if 'type' in parameters:
if parameters.getfirst('type') == 'local':
filename = parameters['pdbfile'].filename
file = StringIO(parameters['pdbfile'].value.decode())
if _guessFileFormat(file, filename) == 'pdbx':
fixer = PDBFixer(pdbxfile=file)
else:
fixer = PDBFixer(pdbfile=file)
fixer.source = filename
else:
id = parameters.getfirst('pdbid')
try:
fixer = PDBFixer(pdbid=id)
except Exception as e:
import traceback
print(traceback.format_exc())
handler.sendResponse(
header + "<p>Unable to download the PDB file. " +
"This may indicate an invalid PDB identifier, " +
"or an error in network connectivity.</p>" +
"<p>{}</p>".format(e) +
loadHtmlFile("error.html"))
displayDeleteChainsPage()
def deleteChainsPageCallback(parameters, handler):
global heterogens
heterogens = parameters.getfirst('heterogens')
numChains = len(list(fixer.topology.chains()))
deleteIndices = [i for i in range(numChains) if 'include'+str(i) not in parameters]
fixer.removeChains(deleteIndices)
displayAddResiduesPage()
def addResiduesPageCallback(parameters, handler):
keys = [key for key in sorted(fixer.missingResidues)]
for i, key in enumerate(keys):
if 'add'+str(i) not in parameters:
del fixer.missingResidues[key]
displayConvertResiduesPage()
def convertResiduesPageCallback(parameters, handler):
for i in range(len(fixer.nonstandardResidues)):
if 'convert'+str(i) in parameters:
fixer.nonstandardResidues[i] = (fixer.nonstandardResidues[i][0], parameters.getfirst('residue'+str(i)))
fixer.replaceNonstandardResidues()
displayMissingAtomsPage()
def missingAtomsPageCallback(parameters, handler):
fixer.addMissingAtoms()
displayAddHydrogensPage()
def addHydrogensPageCallback(parameters, handler):
if 'addhydrogens' in parameters:
pH = float(parameters.getfirst('ph'))
fixer.addMissingHydrogens(pH)
if 'addwater' in parameters:
padding, boxSize, boxVectors = None, None, None
if parameters.getfirst('boxType') == 'geometry':
geompadding = float(parameters.getfirst('geomPadding')) * unit.nanometer
geometry = parameters.getfirst('geometryDropdown')
base_size = float(parameters.getfirst('maxMolecularAxis')) * unit.nanometer
if geometry == 'cube':
padding = geompadding
elif geometry == 'truncatedOctahedron':
vectors = Vec3(1,0,0), Vec3(1/3,2*sqrt(2)/3,0), Vec3(-1/3,sqrt(2)/3,sqrt(6)/3)
boxVectors = [(base_size+geompadding)*v for v in vectors]
elif geometry == 'rhombicDodecahedron':
vectors = Vec3(1,0,0), Vec3(0,1,0), Vec3(0.5,0.5,sqrt(2)/2)
boxVectors = [(base_size+geompadding)*v for v in vectors]
else:
boxSize = (float(parameters.getfirst('boxx')), float(parameters.getfirst('boxy')), float(parameters.getfirst('boxz')))*unit.nanometer
ionicStrength = float(parameters.getfirst('ionicstrength'))*unit.molar
positiveIon = parameters.getfirst('positiveion')+'+'
negativeIon = parameters.getfirst('negativeion')+'-'
fixer.addSolvent(boxSize, padding, boxVectors, positiveIon, negativeIon, ionicStrength)
if 'addmembrane' in parameters:
lipidType = parameters.getfirst('lipidType')
padding = float(parameters.getfirst('membranePadding'))*unit.nanometer
ionicStrength = float(parameters.getfirst('ionicstrength'))*unit.molar
positiveIon = parameters.getfirst('positiveion')+'+'
negativeIon = parameters.getfirst('negativeion')+'-'
fixer.addMembrane(lipidType, 0*unit.nanometer, padding, positiveIon, negativeIon, ionicStrength)
displaySaveFilePage()
def saveFilePageCallback(parameters, handler):
if 'pdb' in parameters:
output = StringIO()
if fixer.source is not None:
output.write("REMARK 1 PDBFIXER FROM: %s\n" % fixer.source)
try:
app.PDBFile.writeFile(fixer.topology, fixer.positions, output, True)
except AssertionError:
print
handler.sendDownload(output.getvalue(), 'output.pdb')
elif 'pdbx' in parameters:
output = StringIO()
if fixer.source is not None:
output.write("# Created with PDBFixer from: %s\n" % fixer.source)
try:
app.PDBxFile.writeFile(fixer.topology, fixer.positions, output, True)
except AssertionError:
print
handler.sendDownload(output.getvalue(), 'output.cif')
else:
displayStartPage()
def displayStartPage():
uiserver.setCallback(startPageCallback)
uiserver.setContent(header+loadHtmlFile("start.html"))
def displayDeleteChainsPage():
uiserver.setCallback(deleteChainsPageCallback)
numChains = len(list(fixer.topology.chains()))
table = ""
for i, chain in enumerate(fixer.topology.chains()):
residues = list(r.name for r in chain.residues())
if any(r in proteinResidues for r in residues):
content = "Protein"
elif any(r in rnaResidues for r in residues):
content = "RNA"
elif any(r in dnaResidues for r in residues):
content = "DNA"
else:
content = ', '.join(set(residues))
table += ' <tr><td>%s</td><td>%d</td><td>%s</td><td><input type="checkbox" name="include%d" checked></td></tr>\n' % (chain.id, len(residues), content, i)
uiserver.setContent(header+loadHtmlFile("removeChains.html") % (numChains, table))
def displayAddResiduesPage():
uiserver.setCallback(addResiduesPageCallback)
fixer.findMissingResidues()
if len(fixer.missingResidues) == 0:
displayConvertResiduesPage()
return
table = ""
chains = list(fixer.topology.chains())
for i, key in enumerate(sorted(fixer.missingResidues)):
residues = fixer.missingResidues[key]
chain = chains[key[0]]
chainResidues = list(chain.residues())
if key[1] < len(chainResidues):
offset = int(chainResidues[key[1]].id)-len(residues)-1
else:
offset = int(chainResidues[-1].id)
table += ' <tr><td>%s</td><td>%d to %d</td><td>%s</td><td><input type="checkbox" name="add%d" checked></td></tr>\n' % (chain.id, offset+1, offset+len(residues), ', '.join(residues), i)
uiserver.setContent(header+loadHtmlFile("addResidues.html") % table)
def displayConvertResiduesPage():
uiserver.setCallback(convertResiduesPageCallback)
fixer.findNonstandardResidues()
if len(fixer.nonstandardResidues) == 0:
displayMissingAtomsPage()
return
table = ''
nucleotides = ['DA', 'DC', 'DG', 'DT', 'A', 'C', 'G', 'T']
for i in range(len(fixer.nonstandardResidues)):
residue, replaceWith = fixer.nonstandardResidues[i]
if replaceWith in proteinResidues:
replacements = proteinResidues
else:
replacements = nucleotides
options = ''
for res in replacements:
selected = ''
if res == replaceWith:
selected = ' selected'
options += '<option value="%s"%s>%s</option>' % (res, selected, res)
table += ' <tr><td>%s</td><td>%s %s</td><td><select name="residue%d">%s</select></td><td><input type="checkbox" name="convert%d" checked></td></tr>\n' % (residue.chain.id, residue.name, residue.id, i, options, i)
uiserver.setContent(header+loadHtmlFile("convertResidues.html") % table)
def displayMissingAtomsPage():
uiserver.setCallback(missingAtomsPageCallback)
if heterogens == 'none':
fixer.removeHeterogens(False)
elif heterogens == 'water':
fixer.removeHeterogens(True)
fixer.findMissingAtoms()
allResidues = list(set(fixer.missingAtoms.keys()).union(fixer.missingTerminals.keys()))
allResidues.sort(key=lambda x: x.index)
if len(allResidues) == 0:
fixer.addMissingAtoms()
displayAddHydrogensPage()
return
table = ""
for residue in allResidues:
atoms = []
if residue in fixer.missingAtoms:
atoms.extend(atom.name for atom in fixer.missingAtoms[residue])
if residue in fixer.missingTerminals:
atoms.extend(atom for atom in fixer.missingTerminals[residue])
table += ' <tr><td>%s</td><td>%s %s</td><td>%s</td></tr>\n' % (residue.chain.id, residue.name, residue.id, ', '.join(atoms))
uiserver.setContent(header+loadHtmlFile("addHeavyAtoms.html") % table)
def displayAddHydrogensPage():
uiserver.setCallback(addHydrogensPageCallback)
dimensions = ""
if fixer.topology.getUnitCellDimensions() is not None:
dimensions = "<tr><td>Crystallographic unit cell:</td><td>%.3f</td><td>%.3f</td><td>%.3f</td></tr>" % fixer.topology.getUnitCellDimensions().value_in_unit(unit.nanometer)
sizeRange = tuple(max((pos[i] for pos in fixer.positions))-min((pos[i] for pos in fixer.positions)) for i in range(3))
dimensions += "<tr id='boxContainingAllAtoms'><td>Box containing all atoms:</td><td>%.3f</td><td>%.3f</td><td>%.3f</td></tr>" % tuple(x.value_in_unit(unit.nanometer) for x in sizeRange)
uiserver.setContent(header+loadHtmlFile("addHydrogens.html") % dimensions)
def displaySaveFilePage():
uiserver.setCallback(saveFilePageCallback)
uiserver.setContent(header+loadHtmlFile("saveFile.html"))
def launchUI():
global header
header = loadHtmlFile("header.html")
uiserver.beginServing()
uiserver.setCallback(controlsCallback, "/controls")
uiserver.setCallback(imageCallback, "/image")
displayStartPage()
url = 'http://localhost:'+str(uiserver.server.server_address[1])
print("PDBFixer running: %s " % url)
webbrowser.open(url)
# the uiserver is running in a background daemon thread that dies whenever
# the main thread exits. So, to keep the whole process alive, we just sleep
# here in the main thread. When Control-C is called, the main thread shuts
# down and then the uiserver exits. Without this daemon/sleep combo, the
# process cannot be killed with Control-C. Reference stack overflow link:
# http://stackoverflow.com/a/11816038/1079728
global uiIsRunning
uiIsRunning = True
while uiIsRunning:
time.sleep(0.5)
| 12,126 | 41.85159 | 223 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/uiserver.py
|
from threading import Thread
import cgi
import sys
try:
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
except:
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import parse_qs
class _Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.hasSentResponse = False
if callback is not None:
queryStart = self.path.find('?')
if queryStart > -1:
parameters = parse_qs(self.path[queryStart+1:])
else:
parameters = {}
self.invokeCallback(parameters)
if not self.hasSentResponse:
self.sendResponse(content)
def do_POST(self):
self.hasSentResponse = False
parameters = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE':self.headers['Content-Type']})
self.invokeCallback(parameters)
if not self.hasSentResponse:
self.sendResponse(content)
def log_message(self, format, *args):
return
def invokeCallback(self, parameters):
path = self.path
if '?' in path:
path = path[:path.find('?')]
if path in callback:
callback[path](parameters, self)
def sendResponse(self, response, type="text/html"):
self.hasSentResponse = True
self.send_response(200)
self.send_header("Content-type", type)
self.send_header("Content-length", str(len(response)))
self.end_headers()
if sys.version_info.major > 2 and isinstance(response, str):
response = bytes(response, 'UTF-8')
self.wfile.write(response)
def sendDownload(self, download, filename):
self.hasSentResponse = True
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(download)))
self.send_header("Content-Disposition", 'attachment; filename="%s"' % filename)
self.end_headers()
if sys.version_info.major > 2:
download = bytes(download, 'UTF-8')
self.wfile.write(download)
class _ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
pass
content = ""
callback = {}
server = _ThreadingHTTPServer(("localhost", 8000), _Handler)
def beginServing():
t = Thread(target=server.serve_forever)
t.daemon = True
t.start()
def setContent(newContent):
global content
content = newContent
def setCallback(newCallback, path="/"):
global callback
callback[path] = newCallback
| 2,717 | 31.746988 | 154 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/pdbfixer.py
|
"""
pdbfixer.py: Fixes problems in PDB files
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2013-2023 Stanford University and the Authors.
Authors: Peter Eastman
Contributors:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
__author__ = "Peter Eastman"
__version__ = "1.7"
import openmm as mm
import openmm.app as app
import openmm.unit as unit
from openmm.app.internal.pdbstructure import PdbStructure
from openmm.app.internal.pdbx.reader.PdbxReader import PdbxReader
from openmm.app.element import hydrogen, oxygen
from openmm.app.forcefield import NonbondedGenerator
# Support Cythonized functions in OpenMM 7.3
# and also implementations in older versions.
try:
from openmm.app.internal import compiled
matchResidue = compiled.matchResidueToTemplate
except ImportError:
matchResidue = app.forcefield._matchResidue
import numpy as np
import numpy.linalg as lin
import sys
import os
import os.path
import math
from pkg_resources import resource_filename
if sys.version_info >= (3,0):
from urllib.request import urlopen
from io import StringIO
else:
from urllib2 import urlopen
from cStringIO import StringIO
substitutions = {
'2AS':'ASP', '3AH':'HIS', '5HP':'GLU', '5OW':'LYS', 'ACL':'ARG', 'AGM':'ARG', 'AIB':'ALA', 'ALM':'ALA', 'ALO':'THR', 'ALY':'LYS', 'ARM':'ARG',
'ASA':'ASP', 'ASB':'ASP', 'ASK':'ASP', 'ASL':'ASP', 'ASQ':'ASP', 'AYA':'ALA', 'BCS':'CYS', 'BHD':'ASP', 'BMT':'THR', 'BNN':'ALA',
'BUC':'CYS', 'BUG':'LEU', 'C5C':'CYS', 'C6C':'CYS', 'CAS':'CYS', 'CCS':'CYS', 'CEA':'CYS', 'CGU':'GLU', 'CHG':'ALA', 'CLE':'LEU', 'CME':'CYS',
'CSD':'ALA', 'CSO':'CYS', 'CSP':'CYS', 'CSS':'CYS', 'CSW':'CYS', 'CSX':'CYS', 'CXM':'MET', 'CY1':'CYS', 'CY3':'CYS', 'CYG':'CYS',
'CYM':'CYS', 'CYQ':'CYS', 'DAH':'PHE', 'DAL':'ALA', 'DAR':'ARG', 'DAS':'ASP', 'DCY':'CYS', 'DGL':'GLU', 'DGN':'GLN', 'DHA':'ALA',
'DHI':'HIS', 'DIL':'ILE', 'DIV':'VAL', 'DLE':'LEU', 'DLY':'LYS', 'DNP':'ALA', 'DPN':'PHE', 'DPR':'PRO', 'DSN':'SER', 'DSP':'ASP',
'DTH':'THR', 'DTR':'TRP', 'DTY':'TYR', 'DVA':'VAL', 'EFC':'CYS', 'FLA':'ALA', 'FME':'MET', 'GGL':'GLU', 'GL3':'GLY', 'GLZ':'GLY',
'GMA':'GLU', 'GSC':'GLY', 'HAC':'ALA', 'HAR':'ARG', 'HIC':'HIS', 'HIP':'HIS', 'HMR':'ARG', 'HPQ':'PHE', 'HTR':'TRP', 'HYP':'PRO',
'IAS':'ASP', 'IIL':'ILE', 'IYR':'TYR', 'KCX':'LYS', 'LLP':'LYS', 'LLY':'LYS', 'LTR':'TRP', 'LYM':'LYS', 'LYZ':'LYS', 'MAA':'ALA', 'MEN':'ASN',
'MHS':'HIS', 'MIS':'SER', 'MK8':'LEU', 'MLE':'LEU', 'MPQ':'GLY', 'MSA':'GLY', 'MSE':'MET', 'MVA':'VAL', 'NEM':'HIS', 'NEP':'HIS', 'NLE':'LEU',
'NLN':'LEU', 'NLP':'LEU', 'NMC':'GLY', 'OAS':'SER', 'OCS':'CYS', 'OMT':'MET', 'PAQ':'TYR', 'PCA':'GLU', 'PEC':'CYS', 'PHI':'PHE',
'PHL':'PHE', 'PR3':'CYS', 'PRR':'ALA', 'PTR':'TYR', 'PYX':'CYS', 'SAC':'SER', 'SAR':'GLY', 'SCH':'CYS', 'SCS':'CYS', 'SCY':'CYS',
'SEL':'SER', 'SEP':'SER', 'SET':'SER', 'SHC':'CYS', 'SHR':'LYS', 'SMC':'CYS', 'SOC':'CYS', 'STY':'TYR', 'SVA':'SER', 'TIH':'ALA',
'TPL':'TRP', 'TPO':'THR', 'TPQ':'ALA', 'TRG':'LYS', 'TRO':'TRP', 'TYB':'TYR', 'TYI':'TYR', 'TYQ':'TYR', 'TYS':'TYR', 'TYY':'TYR'
}
proteinResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS', 'LEU', 'MET', 'PRO', 'THR', 'TYR', 'ARG', 'ASP', 'GLN', 'GLY', 'ILE', 'LYS', 'PHE', 'SER', 'TRP', 'VAL']
rnaResidues = ['A', 'G', 'C', 'U', 'I']
dnaResidues = ['DA', 'DG', 'DC', 'DT', 'DI']
class Sequence(object):
"""Sequence holds the sequence of a chain, as specified by SEQRES records."""
def __init__(self, chainId, residues):
self.chainId = chainId
self.residues = residues
class ModifiedResidue(object):
"""ModifiedResidue holds information about a modified residue, as specified by a MODRES record."""
def __init__(self, chainId, number, residueName, standardName):
self.chainId = chainId
self.number = number
self.residueName = residueName
self.standardName = standardName
def _guessFileFormat(file, filename):
"""Guess whether a file is PDB or PDBx/mmCIF based on its filename and contents."""
filename = filename.lower()
if '.pdbx' in filename or '.cif' in filename:
return 'pdbx'
if '.pdb' in filename:
return 'pdb'
for line in file:
if line.startswith('data_') or line.startswith('loop_'):
file.seek(0)
return 'pdbx'
if line.startswith('HEADER') or line.startswith('REMARK') or line.startswith('TITLE '):
file.seek(0)
return 'pdb'
# It's certainly not a valid PDBx/mmCIF. Guess that it's a PDB.
file.seek(0)
return 'pdb'
def _overlayPoints(points1, points2):
"""Given two sets of points, determine the translation and rotation that matches them as closely as possible.
Parameters
----------
points1 (numpy array of openmm.unit.Quantity with units compatible with distance) - reference set of coordinates
points2 (numpy array of openmm.unit.Quantity with units compatible with distance) - set of coordinates to be rotated
Returns
-------
translate2 - vector to translate points2 by in order to center it
rotate - rotation matrix to apply to centered points2 to map it on to points1
center1 - center of points1
Notes
-----
This is based on W. Kabsch, Acta Cryst., A34, pp. 828-829 (1978).
"""
if len(points1) == 0:
return (mm.Vec3(0, 0, 0), np.identity(3), mm.Vec3(0, 0, 0))
if len(points1) == 1:
return (points1[0], np.identity(3), -1*points2[0])
# Compute centroids.
center1 = unit.sum(points1)/float(len(points1))
center2 = unit.sum(points2)/float(len(points2))
# Compute R matrix.
R = np.zeros((3, 3))
for p1, p2 in zip(points1, points2):
x = p1-center1
y = p2-center2
for i in range(3):
for j in range(3):
R[i][j] += y[i]*x[j]
# Use an SVD to compute the rotation matrix.
(u, s, v) = lin.svd(R)
return (-1*center2, np.dot(u, v).transpose(), center1)
def _findUnoccupiedDirection(point, positions):
"""Given a point in space and a list of atom positions, find the direction in which the local density of atoms is lowest."""
point = point.value_in_unit(unit.nanometers)
direction = mm.Vec3(0, 0, 0)
for pos in positions.value_in_unit(unit.nanometers):
delta = pos-point
distance = unit.norm(delta)
if distance > 0.1:
distance2 = distance*distance
direction -= delta/(distance2*distance2)
direction /= unit.norm(direction)
return direction
class PDBFixer(object):
"""PDBFixer implements many tools for fixing problems in PDB and PDBx/mmCIF files.
"""
def __init__(self, filename=None, pdbfile=None, pdbxfile=None, url=None, pdbid=None):
"""Create a new PDBFixer instance to fix problems in a PDB or PDBx/mmCIF file.
Parameters
----------
filename : str, optional, default=None
The name of the file to read. The format is determined automatically based on the filename extension, or if
that is ambiguous, by looking at the file content.
pdbfile : file, optional, default=None
A file-like object from which the PDB file is to be read.
The file is not closed after reading.
pdbxfile : file, optional, default=None
A file-like object from which the PDBx/mmCIF file is to be read.
The file is not closed after reading.
url : str, optional, default=None
A URL specifying the internet location from which the file contents should be retrieved. The format is
determined automatically by looking for a filename extension in the URL, or if that is ambiguous, by looking
at the file content.
pdbid : str, optional, default=None
A four-letter PDB code specifying the structure to be retrieved from the RCSB.
Notes
-----
Only one of structure, filename, pdbfile, pdbxfile, url, or pdbid may be specified or an exception will be thrown.
Examples
--------
Start from a filename.
>>> filename = resource_filename('pdbfixer', 'tests/data/test.pdb')
>>> fixer = PDBFixer(filename=filename)
Start from a file object.
>>> with open(filename) as f:
... fixer = PDBFixer(pdbfile=f)
Start from a URL.
>>> fixer = PDBFixer(url='http://www.rcsb.org/pdb/files/1VII.pdb')
Start from a PDB code.
>>> fixer = PDBFixer(pdbid='1VII')
"""
# Check to make sure only one option has been specified.
if bool(filename) + bool(pdbfile) + bool(pdbxfile) + bool(url) + bool(pdbid) != 1:
raise Exception("Exactly one option [filename, pdbfile, pdbxfile, url, pdbid] must be specified.")
self.source = None
if pdbid:
# A PDB id has been specified.
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdbid
if filename:
# A local file has been specified.
self.source = filename
file = open(filename, 'r')
if _guessFileFormat(file, filename) == 'pdbx':
self._initializeFromPDBx(file)
else:
self._initializeFromPDB(file)
file.close()
elif pdbfile:
# A file-like object has been specified.
self._initializeFromPDB(pdbfile)
elif pdbxfile:
# A file-like object has been specified.
self._initializeFromPDBx(pdbxfile)
elif url:
# A URL has been specified.
self.source = url
file = urlopen(url)
contents = file.read().decode('utf-8')
file.close()
file = StringIO(contents)
if _guessFileFormat(file, url) == 'pdbx':
self._initializeFromPDBx(contents)
else:
self._initializeFromPDB(StringIO(contents))
# Check the structure has some atoms in it.
atoms = list(self.topology.atoms())
if len(atoms) == 0:
raise Exception("Structure contains no atoms.")
# Load the templates.
self.templates = {}
templatesPath = os.path.join(os.path.dirname(__file__), 'templates')
for file in os.listdir(templatesPath):
templatePdb = app.PDBFile(os.path.join(templatesPath, file))
name = next(templatePdb.topology.residues()).name
self.templates[name] = templatePdb
def _initializeFromPDB(self, file):
"""Initialize this object by reading a PDB file."""
structure = PdbStructure(file)
pdb = app.PDBFile(structure)
self.topology = pdb.topology
self.positions = pdb.positions
self.sequences = [Sequence(s.chain_id, s.residues) for s in structure.sequences]
self.modifiedResidues = [ModifiedResidue(r.chain_id, r.number, r.residue_name, r.standard_name) for r in structure.modified_residues]
def _initializeFromPDBx(self, file):
"""Initialize this object by reading a PDBx/mmCIF file."""
pdbx = app.PDBxFile(file)
self.topology = pdbx.topology
self.positions = pdbx.positions
# PDBxFile doesn't record the information about sequence or modified residues, so we need to read them separately.
file.seek(0)
reader = PdbxReader(file)
data = []
reader.read(data)
block = data[0]
# Load the sequence data.
sequenceData = block.getObj('entity_poly_seq')
sequences = {}
if sequenceData is not None:
entityIdCol = sequenceData.getAttributeIndex('entity_id')
residueCol = sequenceData.getAttributeIndex('mon_id')
for row in sequenceData.getRowList():
entityId = row[entityIdCol]
residue = row[residueCol]
if entityId not in sequences:
sequences[entityId] = []
sequences[entityId].append(residue)
# Sequences are stored by "entity". There could be multiple chains that are all the same entity, so we need to
# convert from entities to chains.
asymData = block.getObj('struct_asym')
self.sequences = []
if asymData is not None:
asymIdCol = asymData.getAttributeIndex('id')
entityIdCol = asymData.getAttributeIndex('entity_id')
for row in asymData.getRowList():
asymId = row[asymIdCol]
entityId = row[entityIdCol]
if entityId in sequences:
self.sequences.append(Sequence(asymId, sequences[entityId]))
# Load the modified residues.
modData = block.getObj('pdbx_struct_mod_residue')
self.modifiedResidues = []
if modData is not None:
asymIdCol = modData.getAttributeIndex('label_asym_id')
resNameCol = modData.getAttributeIndex('label_comp_id')
resNumCol = modData.getAttributeIndex('auth_seq_id')
standardResCol = modData.getAttributeIndex('parent_comp_id')
if -1 not in (asymIdCol, resNameCol, resNumCol, standardResCol):
for row in modData.getRowList():
self.modifiedResidues.append(ModifiedResidue(row[asymIdCol], int(row[resNumCol]), row[resNameCol], row[standardResCol]))
def _addAtomsToTopology(self, heavyAtomsOnly, omitUnknownMolecules):
"""Create a new Topology in which missing atoms have been added.
Parameters
----------
heavyAtomsOnly : bool
If True, only heavy atoms will be added to the topology.
omitUnknownMolecules : bool
If True, unknown molecules will be omitted from the topology.
Returns
-------
newTopology : openmm.app.Topology
A new Topology object containing atoms from the old.
newPositions : list of openmm.unit.Quantity with units compatible with nanometers
Atom positions for the new Topology object.
newAtoms : openmm.app.Topology.Atom
New atom objects.
existingAtomMap : dict
Mapping from old atoms to new atoms.
"""
newTopology = app.Topology()
newPositions = []*unit.nanometer
newAtoms = []
existingAtomMap = {}
addedAtomMap = {}
addedOXT = []
residueCenters = [self._computeResidueCenter(res).value_in_unit(unit.nanometers) for res in self.topology.residues()]*unit.nanometers
for chain in self.topology.chains():
if omitUnknownMolecules and not any(residue.name in self.templates for residue in chain.residues()):
continue
chainResidues = list(chain.residues())
newChain = newTopology.addChain(chain.id)
for indexInChain, residue in enumerate(chain.residues()):
# Insert missing residues here.
if (chain.index, indexInChain) in self.missingResidues:
insertHere = self.missingResidues[(chain.index, indexInChain)]
endPosition = self._computeResidueCenter(residue)
if indexInChain > 0:
startPosition = self._computeResidueCenter(chainResidues[indexInChain-1])
loopDirection = _findUnoccupiedDirection((startPosition+endPosition)/2, residueCenters)
else:
outward = _findUnoccupiedDirection(endPosition, residueCenters)*unit.nanometers
norm = unit.norm(outward)
if norm > 0*unit.nanometer:
outward *= len(insertHere)*0.5*unit.nanometer/norm
startPosition = endPosition+outward
loopDirection = None
firstIndex = int(residue.id)-len(insertHere)
self._addMissingResiduesToChain(newChain, insertHere, startPosition, endPosition, loopDirection, residue, newAtoms, newPositions, firstIndex)
# Create the new residue and add existing heavy atoms.
newResidue = newTopology.addResidue(residue.name, newChain, residue.id, residue.insertionCode)
for atom in residue.atoms():
if not heavyAtomsOnly or (atom.element is not None and atom.element != hydrogen):
if atom.name == 'OXT' and (chain.index, indexInChain+1) in self.missingResidues:
continue # Remove terminal oxygen, since we'll add more residues after this one
newAtom = newTopology.addAtom(atom.name, atom.element, newResidue)
existingAtomMap[atom] = newAtom
newPositions.append(self.positions[atom.index])
if residue in self.missingAtoms:
# Find corresponding atoms in the residue and the template.
template = self.templates[residue.name]
atomPositions = dict((atom.name, self.positions[atom.index]) for atom in residue.atoms())
points1 = []
points2 = []
for atom in template.topology.atoms():
if atom.name in atomPositions:
points1.append(atomPositions[atom.name].value_in_unit(unit.nanometer))
points2.append(template.positions[atom.index].value_in_unit(unit.nanometer))
# Compute the optimal transform to overlay them.
(translate2, rotate, translate1) = _overlayPoints(points1, points2)
# Add the missing atoms.
addedAtomMap[residue] = {}
for atom in self.missingAtoms[residue]:
newAtom = newTopology.addAtom(atom.name, atom.element, newResidue)
newAtoms.append(newAtom)
addedAtomMap[residue][atom] = newAtom
templatePosition = template.positions[atom.index].value_in_unit(unit.nanometer)
newPositions.append((mm.Vec3(*np.dot(rotate, templatePosition+translate2))+translate1)*unit.nanometer)
if residue in self.missingTerminals:
terminalsToAdd = self.missingTerminals[residue]
else:
terminalsToAdd = None
# If this is the end of the chain, add any missing residues that come after it.
if residue == chainResidues[-1] and (chain.index, indexInChain+1) in self.missingResidues:
insertHere = self.missingResidues[(chain.index, indexInChain+1)]
if len(insertHere) > 0:
startPosition = self._computeResidueCenter(residue)
outward = _findUnoccupiedDirection(startPosition, residueCenters)*unit.nanometers
norm = unit.norm(outward)
if norm > 0*unit.nanometer:
outward *= len(insertHere)*0.5*unit.nanometer/norm
endPosition = startPosition+outward
firstIndex = int(residue.id)+1
self._addMissingResiduesToChain(newChain, insertHere, startPosition, endPosition, None, residue, newAtoms, newPositions, firstIndex)
newResidue = list(newChain.residues())[-1]
if newResidue.name in proteinResidues:
terminalsToAdd = ['OXT']
else:
terminalsToAdd = None
# If a terminal OXT is missing, add it.
if terminalsToAdd is not None:
atomPositions = dict((atom.name, newPositions[atom.index].value_in_unit(unit.nanometer)) for atom in newResidue.atoms())
if 'OXT' in terminalsToAdd:
newAtom = newTopology.addAtom('OXT', oxygen, newResidue)
newAtoms.append(newAtom)
addedOXT.append(newAtom)
d_ca_o = atomPositions['O']-atomPositions['CA']
d_ca_c = atomPositions['C']-atomPositions['CA']
d_ca_c /= unit.sqrt(unit.dot(d_ca_c, d_ca_c))
v = d_ca_o - d_ca_c*unit.dot(d_ca_c, d_ca_o)
newPositions.append((atomPositions['O']+2*v)*unit.nanometer)
newTopology.setUnitCellDimensions(self.topology.getUnitCellDimensions())
newTopology.createStandardBonds()
newTopology.createDisulfideBonds(newPositions)
# Add the bonds between atoms in heterogens.
for a1,a2 in self.topology.bonds():
if a1 in existingAtomMap and a2 in existingAtomMap and (a1.residue.name not in app.Topology._standardBonds or a2.residue.name not in app.Topology._standardBonds):
newTopology.addBond(existingAtomMap[a1], existingAtomMap[a2])
# Return the results.
return (newTopology, newPositions, newAtoms, existingAtomMap)
def _computeResidueCenter(self, residue):
"""Compute the centroid of a residue."""
return unit.sum([self.positions[atom.index] for atom in residue.atoms()])/len(list(residue.atoms()))
def _addMissingResiduesToChain(self, chain, residueNames, startPosition, endPosition, loopDirection, orientTo, newAtoms, newPositions, firstIndex):
"""Add a series of residues to a chain."""
orientToPositions = dict((atom.name, self.positions[atom.index]) for atom in orientTo.atoms())
if loopDirection is None:
loopDirection = mm.Vec3(0, 0, 0)
# We'll add the residues in an arc connecting the endpoints. Figure out the height of that arc.
length = unit.norm(endPosition-startPosition)
numResidues = len(residueNames)
if length > numResidues*0.3*unit.nanometers:
loopHeight = 0*unit.nanometers
else:
loopHeight = (numResidues*0.3*unit.nanometers-length)/2
# Add the residues.
for i, residueName in enumerate(residueNames):
template = self.templates[residueName]
# Find a translation that best matches the adjacent residue.
points1 = []
points2 = []
for atom in template.topology.atoms():
if atom.name in orientToPositions:
points1.append(orientToPositions[atom.name].value_in_unit(unit.nanometer))
points2.append(template.positions[atom.index].value_in_unit(unit.nanometer))
(translate2, rotate, translate1) = _overlayPoints(points1, points2)
# Create the new residue.
newResidue = chain.topology.addResidue(residueName, chain, "%d" % ((firstIndex+i)%10000))
fraction = (i+1.0)/(numResidues+1.0)
translate = startPosition + (endPosition-startPosition)*fraction + loopHeight*math.sin(fraction*math.pi)*loopDirection
templateAtoms = list(template.topology.atoms())
if newResidue == next(chain.residues()):
templateAtoms = [atom for atom in templateAtoms if atom.name not in ('P', 'OP1', 'OP2')]
for atom in templateAtoms:
newAtom = chain.topology.addAtom(atom.name, atom.element, newResidue)
newAtoms.append(newAtom)
templatePosition = template.positions[atom.index].value_in_unit(unit.nanometer)
newPositions.append(mm.Vec3(*np.dot(rotate, templatePosition))*unit.nanometer+translate)
def removeChains(self, chainIndices=None, chainIds=None):
"""Remove a set of chains from the structure.
Parameters
----------
chainIndices : list of int, optional, default=None
List of indices of chains to remove.
chainIds : list of str, optional, default=None
List of chain ids of chains to remove.
Examples
--------
Load a PDB file with two chains and eliminate the second chain.
>>> fixer = PDBFixer(pdbid='4J7F')
>>> fixer.removeChains(chainIndices=[1])
Load a PDB file with two chains and eliminate chains named 'B' and 'D'.
>>> fixer = PDBFixer(pdbid='4J7F')
>>> fixer.removeChains(chainIds=['B','D'])
"""
modeller = app.Modeller(self.topology, self.positions)
allChains = list(self.topology.chains())
if chainIndices == None:
chainIndices = list()
if chainIds != None:
# Add all chains that match the selection to the list.
for (chainNumber, chain) in enumerate(allChains):
if chain.id in chainIds:
chainIndices.append(chainNumber)
# Ensure only unique entries remain.
chainIndices = list(set(chainIndices))
# Do nothing if no chains will be deleted.
if len(chainIndices) == 0:
return
modeller.delete(allChains[i] for i in chainIndices)
self.topology = modeller.topology
self.positions = modeller.positions
return
def findMissingResidues(self):
"""Find residues that are missing from the structure.
The results are stored into the missingResidues field, which is a dict. Each key is a tuple consisting of
the index of a chain, and the residue index within that chain at which new residues should be inserted.
The corresponding value is a list of the names of residues to insert there.
Examples
--------
>>> fixer = PDBFixer(pdbid='1VII')
>>> fixer.findMissingResidues()
>>> missing_residues = fixer.missingResidues
"""
chains = [c for c in self.topology.chains() if len(list(c.residues())) > 0]
chainWithGaps = {}
# Find the sequence of each chain, with gaps for missing residues.
for chain in chains:
residues = list(chain.residues())
ids = [int(r.id) for r in residues]
for i, res in enumerate(residues):
if res.insertionCode not in ('', ' '):
for j in range(i, len(residues)):
ids[j] += 1
minResidue = min(ids)
maxResidue = max(ids)
chainWithGaps[chain] = [None]*(maxResidue-minResidue+1)
for r, id in zip(residues, ids):
chainWithGaps[chain][id-minResidue] = r.name
# Try to find the chain that matches each sequence.
chainSequence = {}
chainOffset = {}
for sequence in self.sequences:
for chain in chains:
if chain.id != sequence.chainId:
continue
if chain in chainSequence:
continue
for offset in range(len(sequence.residues)-len(chainWithGaps[chain])+1):
if all(a == b or b == None for a,b in zip(sequence.residues[offset:], chainWithGaps[chain])):
chainSequence[chain] = sequence
chainOffset[chain] = offset
break
if chain in chainSequence:
break
# Now build the list of residues to add.
self.missingResidues = {}
for chain in self.topology.chains():
if chain in chainSequence:
offset = chainOffset[chain]
sequence = chainSequence[chain].residues
gappedSequence = chainWithGaps[chain]
index = 0
for i in range(len(sequence)):
if i < offset or i >= len(gappedSequence)+offset or gappedSequence[i-offset] is None:
key = (chain.index, index)
if key not in self.missingResidues:
self.missingResidues[key] = []
residueName = sequence[i]
if residueName in substitutions:
residueName = substitutions[sequence[i]]
self.missingResidues[key].append(residueName)
else:
index += 1
def findNonstandardResidues(self):
"""Identify non-standard residues found in the structure, and select standard residues to replace them with.
The results are stored into the nonstandardResidues field, which is a map of Residue objects to the names
of suggested replacement residues.
Examples
--------
Find nonstandard residues.
>>> fixer = PDBFixer(pdbid='1YRI')
>>> fixer.findNonstandardResidues()
>>> nonstandard_residues = fixer.nonstandardResidues
"""
# First find residues based on our table of standard substitutions.
nonstandard = dict((r, substitutions[r.name]) for r in self.topology.residues() if r.name in substitutions)
# Now add ones based on MODRES records.
modres = dict(((m.chainId, str(m.number), m.residueName), m.standardName) for m in self.modifiedResidues)
for chain in self.topology.chains():
for residue in chain.residues():
key = (chain.id, residue.id, residue.name)
if key in modres:
replacement = modres[key]
if replacement == 'DU':
replacement = 'DT'
if replacement in self.templates:
nonstandard[residue] = replacement
self.nonstandardResidues = [(r, nonstandard[r]) for r in sorted(nonstandard, key=lambda r: r.index)]
def replaceNonstandardResidues(self):
"""Replace every residue listed in the nonstandardResidues field with the specified standard residue.
Notes
-----
You must have first called findNonstandardResidues() to identify nonstandard residues.
Examples
--------
Find and replace nonstandard residues using replacement templates stored in the 'templates' field of PDBFixer object.
>>> fixer = PDBFixer(pdbid='1YRI')
>>> fixer.findNonstandardResidues()
>>> fixer.replaceNonstandardResidues()
"""
if len(self.nonstandardResidues) > 0:
deleteAtoms = []
# Find atoms that should be deleted.
for residue, replaceWith in self.nonstandardResidues:
residue.name = replaceWith
template = self.templates[replaceWith]
standardAtoms = set(atom.name for atom in template.topology.atoms())
for atom in residue.atoms():
if atom.element in (None, hydrogen) or atom.name not in standardAtoms:
deleteAtoms.append(atom)
# Delete them.
modeller = app.Modeller(self.topology, self.positions)
modeller.delete(deleteAtoms)
self.topology = modeller.topology
self.positions = modeller.positions
def applyMutations(self, mutations, chain_id):
"""Apply a list of amino acid substitutions to make a mutant protein.
Parameters
----------
mutations : list of strings
Each string must include the resName (original), index,
and resName (target). For example, ALA-133-GLY will mutate
alanine 133 to glycine.
chain_id : str
String based chain ID of the single chain you wish to mutate.
Notes
-----
We require three letter codes to avoid possible ambiguitities.
We can't guarantee that the resulting model is a good one; for
significant changes in sequence, you should probably be using
a standalone homology modelling tool.
Examples
--------
Find nonstandard residues.
>>> fixer = PDBFixer(pdbid='1VII')
>>> fixer.applyMutations(["ALA-57-GLY"], "A")
>>> fixer.findMissingResidues()
>>> fixer.findMissingAtoms()
>>> fixer.addMissingAtoms()
>>> fixer.addMissingHydrogens(7.0)
"""
# Retrieve all residues that match the specified chain_id.
# NOTE: Multiple chains may have the same chainid, but must have unique resSeq entries.
resSeq_to_residue = dict() # resSeq_to_residue[resid] is the residue in the requested chain corresponding to residue identifier 'resid'
for chain in self.topology.chains():
if chain.id == chain_id:
for residue in chain.residues():
resSeq_to_residue[int(residue.id)] = residue
# Make a map of residues to mutate based on requested mutation list.
residue_map = dict() # residue_map[residue] is the name of the new residue to mutate to, if a mutation is desired
for mut_str in mutations:
old_name, resSeq, new_name = mut_str.split("-")
resSeq = int(resSeq)
if resSeq not in resSeq_to_residue:
raise(KeyError("Cannot find chain %s residue %d in system!" % (chain_id, resSeq)))
residue = resSeq_to_residue[resSeq] # retrieve the requested residue
if residue.name != old_name:
raise(ValueError("You asked to mutate chain %s residue %d name %s, but that residue is actually %s!" % (chain_id, resSeq, old_name, residue.name)))
try:
template = self.templates[new_name]
except KeyError:
raise(KeyError("Cannot find residue %s in template library!" % new_name))
# Store mutation
residue_map[residue] = new_name
# If there are mutations to be made, make them.
if len(residue_map) > 0:
deleteAtoms = [] # list of atoms to delete
# Find atoms that should be deleted.
for residue in residue_map.keys():
replaceWith = residue_map[residue]
residue.name = replaceWith
template = self.templates[replaceWith]
standardAtoms = set(atom.name for atom in template.topology.atoms())
for atom in residue.atoms():
if atom.element in (None, hydrogen) or atom.name not in standardAtoms:
deleteAtoms.append(atom)
# Delete atoms queued to be deleted.
modeller = app.Modeller(self.topology, self.positions)
modeller.delete(deleteAtoms)
self.topology = modeller.topology
self.positions = modeller.positions
def findMissingAtoms(self):
"""Find heavy atoms that are missing from the structure.
The results are stored into two fields: missingAtoms and missingTerminals. Each of these is a dict whose keys
are Residue objects and whose values are lists of atom names. missingAtoms contains standard atoms that should
be present in any residue of that type. missingTerminals contains terminal atoms that should be present at the
start or end of a chain.
Notes
-----
You must have first called findMissingResidues().
Examples
--------
Find missing heavy atoms in Abl kinase structure.
>>> fixer = PDBFixer(pdbid='2F4J')
>>> fixer.findMissingResidues()
>>> fixer.findMissingAtoms()
>>> # Retrieve missing atoms.
>>> missingAtoms = fixer.missingAtoms
>>> # Retrieve missing terminal atoms.
>>> missingTerminals = fixer.missingTerminals
"""
missingAtoms = {}
missingTerminals = {}
# Loop over residues.
for chain in self.topology.chains():
chainResidues = list(chain.residues())
for residue in chain.residues():
if residue.name in self.templates:
template = self.templates[residue.name]
atomNames = set(atom.name for atom in residue.atoms())
templateAtoms = list(template.topology.atoms())
if residue == chainResidues[0] and (chain.index, 0) not in self.missingResidues:
templateAtoms = [atom for atom in templateAtoms if atom.name not in ('P', 'OP1', 'OP2')]
# Add atoms from the template that are missing.
missing = []
for atom in templateAtoms:
if atom.name not in atomNames:
missing.append(atom)
if len(missing) > 0:
missingAtoms[residue] = missing
# Add missing terminal atoms.
terminals = []
if residue == chainResidues[-1] and (chain.index, len(chainResidues)) not in self.missingResidues:
templateNames = set(atom.name for atom in template.topology.atoms())
if 'OXT' not in atomNames and all(name in templateNames for name in ['C', 'O', 'CA']):
terminals.append('OXT')
if len(terminals) > 0:
missingTerminals[residue] = terminals
self.missingAtoms = missingAtoms
self.missingTerminals = missingTerminals
def addMissingAtoms(self, seed=None):
"""Add all missing heavy atoms, as specified by the missingAtoms, missingTerminals, and missingResidues fields.
Parameters
----------
seed : int
Integer to set the random seed number of the integrator used in the minimization of the
coordinates of the newly-added atoms.
Notes
-----
You must already have called findMissingAtoms() to have identified atoms to be added.
Examples
--------
Find missing heavy atoms in Abl kinase structure.
>>> fixer = PDBFixer(pdbid='2F4J')
>>> fixer.findMissingResidues()
>>> fixer.findMissingAtoms()
>>> fixer.addMissingAtoms()
"""
# Create a Topology that 1) adds missing atoms, 2) removes all hydrogens, and 3) removes unknown molecules.
(newTopology, newPositions, newAtoms, existingAtomMap) = self._addAtomsToTopology(True, True)
if len(newAtoms) == 0:
# No atoms were added, but new bonds might have been created.
newBonds = set(newTopology.bonds())
for atom1, atom2 in self.topology.bonds():
if atom1 in existingAtomMap and atom2 in existingAtomMap:
a1 = existingAtomMap[atom1]
a2 = existingAtomMap[atom2]
if (a1, a2) in newBonds:
newBonds.remove((a1, a2))
elif (a2, a1) in newBonds:
newBonds.remove((a2, a1))
# Add the new bonds to the original Topology.
inverseAtomMap = dict((y,x) for (x,y) in existingAtomMap.items())
for atom1, atom2 in newBonds:
self.topology.addBond(inverseAtomMap[atom1], inverseAtomMap[atom2])
else:
# Create a System for energy minimizing it.
forcefield = self._createForceField(newTopology, False)
system = forcefield.createSystem(newTopology)
# Set any previously existing atoms to be massless, they so won't move.
for atom in existingAtomMap.values():
system.setParticleMass(atom.index, 0.0)
# If any heavy atoms were omitted, add them back to avoid steric clashes.
nonbonded = [f for f in system.getForces() if isinstance(f, mm.CustomNonbondedForce)][0]
for atom in self.topology.atoms():
if atom.element not in (None, hydrogen) and atom not in existingAtomMap:
system.addParticle(0.0)
nonbonded.addParticle([])
newPositions.append(self.positions[atom.index])
# For efficiency, only compute interactions that involve a new atom.
nonbonded.addInteractionGroup([atom.index for atom in newAtoms], range(system.getNumParticles()))
# Do an energy minimization.
integrator = mm.LangevinIntegrator(300*unit.kelvin, 10/unit.picosecond, 5*unit.femtosecond)
if seed is not None:
integrator.setRandomNumberSeed(seed)
context = mm.Context(system, integrator)
context.setPositions(newPositions)
mm.LocalEnergyMinimizer.minimize(context)
state = context.getState(getPositions=True)
if newTopology.getNumResidues() > 1:
# When looking for pairs of atoms that are too close to each other, exclude pairs that
# are in the same residue or are directly bonded to each other.
exclusions = dict((atom, {a.index for a in atom.residue.atoms()}) for atom in newAtoms)
for a1, a2 in newTopology.bonds():
if a1 in exclusions:
exclusions[a1].add(a2.index)
if a2 in exclusions:
exclusions[a2].add(a1.index)
cutoff = 0.13
nearest = self._findNearestDistance(context, newAtoms, cutoff, exclusions)
if nearest < cutoff:
# Some atoms are very close together. Run some dynamics while slowly increasing the strength of the
# repulsive interaction to try to improve the result.
for i in range(10):
context.setParameter('C', 0.15*(i+1))
integrator.step(200)
d = self._findNearestDistance(context, newAtoms, cutoff, exclusions)
if d > nearest:
nearest = d
state = context.getState(getPositions=True)
if nearest >= cutoff:
break
context.setState(state)
context.setParameter('C', 1.0)
mm.LocalEnergyMinimizer.minimize(context)
state = context.getState(getPositions=True)
# Now create a new Topology, including all atoms from the original one and adding the missing atoms.
(newTopology2, newPositions2, newAtoms2, existingAtomMap2) = self._addAtomsToTopology(False, False)
# Copy over the minimized positions for the new atoms.
for a1, a2 in zip(newAtoms, newAtoms2):
newPositions2[a2.index] = state.getPositions()[a1.index]
self.topology = newTopology2
self.positions = newPositions2
def removeHeterogens(self, keepWater=True):
"""Remove all heterogens from the structure.
Parameters
----------
keepWater : bool, optional, default=True
If True, water molecules will not be removed.
Examples
--------
Remove heterogens in Abl structure complexed with imatinib.
>>> fixer = PDBFixer(pdbid='2F4J')
>>> fixer.removeHeterogens(keepWater=False)
"""
keep = set(proteinResidues).union(dnaResidues).union(rnaResidues)
keep.add('N')
keep.add('UNK')
if keepWater:
keep.add('HOH')
toDelete = []
for residue in self.topology.residues():
if residue.name not in keep:
toDelete.append(residue)
modeller = app.Modeller(self.topology, self.positions)
modeller.delete(toDelete)
self.topology = modeller.topology
self.positions = modeller.positions
def addMissingHydrogens(self, pH=7.0, forcefield=None):
"""Add missing hydrogen atoms to the structure.
Parameters
----------
pH : float, optional, default=7.0
The pH based on which to select hydrogens.
forcefield : ForceField, optional, default=None
The forcefield used when adding and minimizing hydrogens. If None, a default forcefield is used.
Notes
-----
No extensive electrostatic analysis is performed; only default residue pKas are used.
Examples
--------
Examples
--------
Add missing hydrogens appropriate for pH 8.
>>> fixer = PDBFixer(pdbid='1VII')
>>> fixer.addMissingHydrogens(pH=8.0)
"""
modeller = app.Modeller(self.topology, self.positions)
modeller.addHydrogens(pH=pH, forcefield=forcefield)
self.topology = modeller.topology
self.positions = modeller.positions
def addSolvent(self, boxSize=None, padding=None, boxVectors=None, positiveIon='Na+', negativeIon='Cl-', ionicStrength=0*unit.molar, boxShape='cube'):
"""Add a solvent box surrounding the structure.
Parameters
----------
boxSize : openmm.Vec3, optional, default=None
The size of the box to fill with water. If specified, padding and boxVectors must not be specified.
padding : openmm.unit.Quantity compatible with nanometers, optional, default=None
Padding around macromolecule for filling box with water. If specified, boxSize and boxVectors must not be specified.
boxVectors : 3-tuple of openmm.Vec3, optional, default=None
Three vectors specifying the geometry of the box. If specified, padding and boxSize must not be specified.
positiveIon : str, optional, default='Na+'
The type of positive ion to add. Allowed values are 'Cs+', 'K+', 'Li+', 'Na+', and 'Rb+'.
negativeIon : str, optional, default='Cl-'
The type of negative ion to add. Allowed values are 'Cl-', 'Br-', 'F-', and 'I-'.
ionicStrength : openmm.unit.Quantity with units compatible with molar, optional, default=0*molar
The total concentration of ions (both positive and negative) to add. This does not include ions that are added to neutralize the system.
boxShape: str='cube'
the box shape to use. Allowed values are 'cube', 'dodecahedron', and 'octahedron'. If padding is None, this is ignored.
Examples
--------
Add missing residues, heavy atoms, and hydrogens, and then solvate with 10 A padding.
>>> fixer = PDBFixer(pdbid='1VII')
>>> fixer.findMissingResidues()
>>> fixer.findMissingAtoms()
>>> fixer.addMissingAtoms()
>>> fixer.addMissingHydrogens(pH=8.0)
>>> fixer.addSolvent(padding=10*unit.angstrom, ionicStrength=0.050*unit.molar)
"""
modeller = app.Modeller(self.topology, self.positions)
forcefield = self._createForceField(self.topology, True)
modeller.addSolvent(forcefield, padding=padding, boxSize=boxSize, boxVectors=boxVectors, boxShape=boxShape, positiveIon=positiveIon, negativeIon=negativeIon, ionicStrength=ionicStrength)
chains = list(modeller.topology.chains())
if len(chains) == 1:
chains[0].id = 'A'
else:
chains[-1].id = chr(ord(chains[-2].id)+1)
self.topology = modeller.topology
self.positions = modeller.positions
def addMembrane(self, lipidType='POPC', membraneCenterZ=0*unit.nanometer, minimumPadding=1*unit.nanometer, positiveIon='Na+', negativeIon='Cl-', ionicStrength=0*unit.molar):
"""Add a lipid membrane to the structure.
This method adds both lipids and water, so you should call either addSolvent() or addMembrane(),
but not both. See Modeller.addMembrane() for more details.
Parameters
----------
lipidType : string='POPC'
the type of lipid to use. Supported values are 'POPC', 'POPE', 'DLPC', 'DLPE', 'DMPC', 'DOPC', and 'DPPC'.
membraneCenterZ: distance=0*nanometer
the position along the Z axis of the center of the membrane
minimumPadding : distance=1*nanometer
the padding distance to use
positiveIon : str, optional, default='Na+'
The type of positive ion to add. Allowed values are 'Cs+', 'K+', 'Li+', 'Na+', and 'Rb+'.
negativeIon : str, optional, default='Cl-'
The type of negative ion to add. Allowed values are 'Cl-', 'Br-', 'F-', and 'I-'.
ionicStrength : openmm.unit.Quantity with units compatible with molar, optional, default=0*molar
The total concentration of ions (both positive and negative) to add. This does not include ions that are added to neutralize the system.
"""
modeller = app.Modeller(self.topology, self.positions)
forcefield = self._createForceField(self.topology, True)
modeller.addMembrane(forcefield, lipidType=lipidType, minimumPadding=minimumPadding, positiveIon=positiveIon, negativeIon=negativeIon, ionicStrength=ionicStrength)
chains = list(modeller.topology.chains())
if len(chains) == 1:
chains[0].id = 'A'
else:
chains[-1].id = chr(ord(chains[-2].id)+1)
self.topology = modeller.topology
self.positions = modeller.positions
def _createForceField(self, newTopology, water):
"""Create a force field to use for optimizing the positions of newly added atoms."""
if water:
forcefield = app.ForceField('amber14-all.xml', 'amber14/tip3p.xml')
nonbonded = [f for f in forcefield._forces if isinstance(f, NonbondedGenerator)][0]
radii = {'H':0.198, 'Li':0.203, 'C':0.340, 'N':0.325, 'O':0.299, 'F':0.312, 'Na':0.333, 'Mg':0.141,
'P':0.374, 'S':0.356, 'Cl':0.347, 'K':0.474, 'Br':0.396, 'Rb':0.527, 'I':0.419, 'Cs':0.605}
else:
forcefield = app.ForceField(os.path.join(os.path.dirname(__file__), 'soft.xml'))
# The Topology may contain residues for which the ForceField does not have a template.
# If so, we need to create new templates for them.
atomTypes = {}
bondedToAtom = []
for atom in newTopology.atoms():
bondedToAtom.append(set())
for atom1, atom2 in newTopology.bonds():
bondedToAtom[atom1.index].add(atom2.index)
bondedToAtom[atom2.index].add(atom1.index)
for residue in newTopology.residues():
# Make sure the ForceField has a template for this residue.
signature = app.forcefield._createResidueSignature([atom.element for atom in residue.atoms()])
if signature in forcefield._templateSignatures:
if any(matchResidue(residue, t, bondedToAtom) is not None for t in forcefield._templateSignatures[signature]):
continue
# Create a new template.
resName = "extra_"+residue.name
template = app.ForceField._TemplateData(resName)
forcefield._templates[resName] = template
indexInResidue = {}
for atom in residue.atoms():
element = atom.element
typeName = 'extra_'+element.symbol
if element not in atomTypes:
atomTypes[element] = app.ForceField._AtomType(typeName, '', 0.0, element)
forcefield._atomTypes[typeName] = atomTypes[element]
if water:
# Select a reasonable vdW radius for this atom type.
if element.symbol in radii:
sigma = radii[element.symbol]
else:
sigma = 0.5
nonbonded.registerAtom({'type':typeName, 'charge':'0', 'sigma':str(sigma), 'epsilon':'0'})
indexInResidue[atom.index] = len(template.atoms)
template.atoms.append(app.ForceField._TemplateAtomData(atom.name, typeName, element))
for atom in residue.atoms():
for bondedTo in bondedToAtom[atom.index]:
if bondedTo in indexInResidue:
b = (indexInResidue[atom.index], indexInResidue[bondedTo])
if b[0] < b[1]:
template.bonds.append(b)
template.atoms[b[0]].bondedTo.append(b[1])
template.atoms[b[1]].bondedTo.append(b[0])
else:
b = indexInResidue[atom.index]
template.externalBonds.append(b)
template.atoms[b].externalBonds += 1
if signature in forcefield._templateSignatures:
forcefield._templateSignatures[signature].append(template)
else:
forcefield._templateSignatures[signature] = [template]
return forcefield
def _findNearestDistance(self, context, newAtoms, cutoff, exclusions):
"""Given a set of newly added atoms, find the closest distance between one of those atoms and another atom."""
positions = context.getState(getPositions=True).getPositions(asNumpy=True).value_in_unit(unit.nanometer)
boxSize = np.max(positions, axis=0)-np.min(positions, axis=0)
boxVectors = [(boxSize[0], 0, 0), (0, boxSize[1], 0), (0, 0, boxSize[2])]
cells = app.modeller._CellList(positions, cutoff, boxVectors, False)
nearest_squared = sys.float_info.max
for atom in newAtoms:
excluded = exclusions[atom]
for i in cells.neighbors(positions[atom.index]):
if i not in excluded:
p = positions[atom.index]-positions[i]
dist_squared = np.dot(p, p)
if dist_squared < nearest_squared:
nearest_squared = dist_squared
return np.sqrt(nearest_squared)
def main():
if len(sys.argv) < 2:
# Display the UI.
from . import ui
ui.launchUI()
else:
# Run in command line mode.
from optparse import OptionParser
parser = OptionParser(usage="Usage: %prog\n %prog filename [options] \n\nWhen run with no arguments, it launches the user interface. If any arguments are specified, it runs in command line mode.")
parser.add_option('--pdbid', default=None, dest='pdbid', metavar='PDBID', help='PDB id to retrieve from RCSB [default: None]')
parser.add_option('--url', default=None, dest='url', metavar='URL', help='URL to retrieve PDB from [default: None]')
parser.add_option('--output', default='output.pdb', dest='output', metavar='FILENAME', help='output pdb file [default: output.pdb]')
parser.add_option('--add-atoms', default='all', dest='atoms', choices=('all', 'heavy', 'hydrogen', 'none'), help='which missing atoms to add: all, heavy, hydrogen, or none [default: all]')
parser.add_option('--keep-heterogens', default='all', dest='heterogens', choices=('all', 'water', 'none'), metavar='OPTION', help='which heterogens to keep: all, water, or none [default: all]')
parser.add_option('--replace-nonstandard', action='store_true', default=False, dest='nonstandard', help='replace nonstandard residues with standard equivalents')
parser.add_option('--add-residues', action='store_true', default=False, dest='residues', help='add missing residues')
parser.add_option('--water-box', dest='box', type='float', nargs=3, metavar='X Y Z', help='add a water box. The value is the box dimensions in nm [example: --water-box=2.5 2.4 3.0]')
parser.add_option('--ph', type='float', default=7.0, dest='ph', help='the pH to use for adding missing hydrogens [default: 7.0]')
parser.add_option('--positive-ion', default='Na+', dest='positiveIon', choices=('Cs+', 'K+', 'Li+', 'Na+', 'Rb+'), metavar='ION', help='positive ion to include in the water box: Cs+, K+, Li+, Na+, or Rb+ [default: Na+]')
parser.add_option('--negative-ion', default='Cl-', dest='negativeIon', choices=('Cl-', 'Br-', 'F-', 'I-'), metavar='ION', help='negative ion to include in the water box: Cl-, Br-, F-, or I- [default: Cl-]')
parser.add_option('--ionic-strength', type='float', default=0.0, dest='ionic', metavar='STRENGTH', help='molar concentration of ions to add to the water box [default: 0.0]')
parser.add_option('--verbose', default=False, action='store_true', dest='verbose', metavar='VERBOSE', help='Print verbose output')
(options, args) = parser.parse_args()
if (len(args) == 0) and (options.pdbid==None) and (options.url==None):
parser.error('No filename specified')
if len(args) > 1:
parser.error('Must specify a single filename or --pdbid or --url')
if options.pdbid != None:
if options.verbose: print('Retrieving PDB "' + options.pdbid + '" from RCSB...')
fixer = PDBFixer(pdbid=options.pdbid)
elif options.url != None:
if options.verbose: print('Retrieving PDB from URL "' + options.url + '"...')
fixer = PDBFixer(url=options.url)
else:
fixer = PDBFixer(filename=sys.argv[1])
if options.residues:
if options.verbose: print('Finding missing residues...')
fixer.findMissingResidues()
else:
fixer.missingResidues = {}
if options.nonstandard:
if options.verbose: print('Finding nonstandard residues...')
fixer.findNonstandardResidues()
if options.verbose: print('Replacing nonstandard residues...')
fixer.replaceNonstandardResidues()
if options.heterogens == 'none':
fixer.removeHeterogens(False)
elif options.heterogens == 'water':
fixer.removeHeterogens(True)
if options.verbose: print('Finding missing atoms...')
fixer.findMissingAtoms()
if options.atoms not in ('all', 'heavy'):
fixer.missingAtoms = {}
fixer.missingTerminals = {}
if options.verbose: print('Adding missing atoms...')
fixer.addMissingAtoms()
if options.atoms in ('all', 'hydrogen'):
if options.verbose: print('Adding missing hydrogens...')
fixer.addMissingHydrogens(options.ph)
if options.box is not None:
if options.verbose: print('Adding solvent...')
fixer.addSolvent(boxSize=options.box*unit.nanometer, positiveIon=options.positiveIon,
negativeIon=options.negativeIon, ionicStrength=options.ionic*unit.molar)
with open(options.output, 'w') as f:
if options.verbose: print('Writing output...')
if fixer.source is not None:
f.write("REMARK 1 PDBFIXER FROM: %s\n" % fixer.source)
app.PDBFile.writeFile(fixer.topology, fixer.positions, f, True)
if options.verbose: print('Done.')
if __name__ == '__main__':
main()
| 60,255 | 45.350769 | 228 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/__init__.py
|
from __future__ import absolute_import
from .pdbfixer import PDBFixer
| 71 | 17 | 38 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/tests/test_mutate.py
|
import openmm.app as app
import pdbfixer
import tempfile
from pytest import raises
def test_mutate_1():
fixer = pdbfixer.PDBFixer(pdbid='1VII')
fixer.applyMutations(["ALA-57-GLY"], "A")
fixer.findMissingResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.addMissingHydrogens(7.0)
with tempfile.NamedTemporaryFile(mode='w+') as temp_pdb:
app.PDBFile.writeFile(fixer.topology, fixer.positions, temp_pdb)
temp_pdb.flush()
pdb = app.PDBFile(temp_pdb.name)
new_residue57 = list(fixer.topology.residues())[16]
assert new_residue57.name == "GLY", "Name of mutated residue did not change correctly!"
assert len(list(new_residue57.atoms())) == 7, "Should have 7 atoms in GLY 56"
atom_names = set([atom.name for atom in new_residue57.atoms()])
desired_atom_names = set(["N", "CA", "C", "O", "H", "HA3", "HA2"])
assert atom_names == desired_atom_names, "Atom Names did not match for GLY 56"
def test_mutate_2():
fixer = pdbfixer.PDBFixer(pdbid='1VII')
fixer.applyMutations(["ALA-57-LEU", "SER-56-ALA"], "A")
fixer.findMissingResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.addMissingHydrogens(7.0)
temp_pdb = tempfile.NamedTemporaryFile(mode='w+')
new_residue57 = list(fixer.topology.residues())[16]
new_residue56 = list(fixer.topology.residues())[15]
assert new_residue57.name == "LEU", "Name of mutated residue did not change correctly!"
assert new_residue56.name == "ALA", "Name of mutated residue did not change correctly!"
assert len(list(new_residue56.atoms())) == 10, "Should have 10 atoms in ALA 56"
assert len(list(new_residue57.atoms())) == 19, "Should have 19 atoms in LEU 57"
atom_names = set([atom.name for atom in new_residue56.atoms()])
desired_atom_names = set(["N", "CA", "CB", "C", "O", "H", "HA", "HB1", "HB2", "HB3"])
assert atom_names == desired_atom_names, "Atom Names did not match for ALA 56"
atom_names = set([atom.name for atom in new_residue57.atoms()])
desired_atom_names = set(["C", "N", "CA", "CB", "CG", "CD1", "CD2", "O", "H", "HA", "HB2", "HB3", "HD11", "HD12", "HD13", "HD21", "HD22", "HD23", "HG"])
assert atom_names == desired_atom_names, "Atom Names did not match for LEU 57"
def test_mutate_3_fails():
with raises(ValueError):
fixer = pdbfixer.PDBFixer(pdbid='1VII')
fixer.applyMutations(["ALA-57-GLY", "SER-57-ALA"], "A")
def test_mutate_4_fails():
with raises(KeyError):
fixer = pdbfixer.PDBFixer(pdbid='1VII')
fixer.applyMutations(["ALA-57-WTF", "SER-56-ALA"], "A")
def test_mutate_5_fails():
with raises(KeyError):
fixer = pdbfixer.PDBFixer(pdbid='1VII')
fixer.applyMutations(["ALA-1000-GLY", "SER-56-ALA"], "A")
def test_mutate_multiple_copies_of_chain_A():
fixer = pdbfixer.PDBFixer(pdbid='1OL5')
fixer.applyMutations(['TPO-287-THR','TPO-288-THR'], "A")
| 2,999 | 41.253521 | 156 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/tests/test_cli.py
|
#!/usr/bin/python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test command-line interface.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import subprocess
from subprocess import CalledProcessError
import pytest
#=============================================================================================
# UNIT TESTS
#=============================================================================================
def run_cli(arguments, expected_output=None):
try:
output = subprocess.check_output('pdbfixer ' + arguments, shell=True)
except CalledProcessError as e:
message = "An error return value (%s) was obtained:\n" % str(e.returncode)
message += "\n"
message += str(e.output)
message += "\n"
raise Exception(message)
if expected_output:
if output != expected_output:
message = "Output differs from expected output.\n"
message += "\n"
message += "Expected output:\n"
message += expected_output
message += "\n"
message += "Actual output:\n"
message += output
message += "\n"
raise Exception(message)
def test_help():
run_cli('--help')
def test_pdbid():
run_cli('--pdbid 1LE1')
@pytest.mark.skipif(os.getenv("GITHUB_ACTION") is not None, reason="Cannot download during CI")
def test_url():
run_cli('--url "http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId=1LE1"')
| 1,855 | 31.561404 | 118 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/tests/test_build_and_simulate.py
|
from __future__ import print_function
import pdbfixer
import openmm
import os
import os.path
import sys
import numpy
import tempfile
from threading import Timer
#from a solution on stackoverflow
class Watchdog(BaseException):
def __init__(self, timeout, userHandler=None): # timeout in seconds
self.timeout = timeout
self.handler = userHandler if userHandler is not None else self.defaultHandler
self.timer = Timer(self.timeout, self.handler)
def reset(self):
self.timer.cancel()
self.timer = Timer(self.timeout, self.handler)
def stop(self):
self.timer.cancel()
def defaultHandler(self):
raise self
def simulate(pdbcode, pdb_filename):
from openmm import app
import openmm.openmm as mm
from openmm import unit
from sys import stdout
# Load the PDB file.
pdb = app.PDBFile(pdb_filename)
# Set up implicit solvent forcefield.
#forcefield = app.ForceField('amber99sbildn.xml')
forcefield = app.ForceField('amber10.xml')
# Create the system.
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.NoCutoff, constraints=app.HBonds)
# Create an integrator.
integrator = mm.LangevinIntegrator(300*unit.kelvin, 91.0/unit.picoseconds, 1.0*unit.femtoseconds)
# Create a context.
context = mm.Context(system, integrator)
context.setPositions(pdb.positions)
# Check to make sure energy is finite.
state = context.getState(getEnergy=True)
potential = state.getPotentialEnergy() / unit.kilocalories_per_mole
if numpy.isnan(potential):
raise Exception("Initial energy for %s is NaN." % pdbcode)
# Minimize.
tolerance = 1.0 * unit.kilocalories_per_mole / unit.angstroms
maxIterations = 50
mm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
# Check to make sure energy is finite.
state = context.getState(getEnergy=True)
potential = state.getPotentialEnergy() / unit.kilocalories_per_mole
if numpy.isnan(potential):
raise Exception("Energy for %s is NaN after minimization." % pdbcode)
# Simulate.
nsteps = 500
integrator.step(nsteps)
# Check to make sure energy is finite.
state = context.getState(getEnergy=True)
potential = state.getPotentialEnergy() / unit.kilocalories_per_mole
if numpy.isnan(potential):
raise Exception("Energy for %s is NaN after simulation." % pdbcode)
del context, integrator
print("Simulation completed: potential = %.3f kcal/mol" % potential)
return
def test_build_and_simulate():
# DEBUG: These are tough PDB codes from http://www.umass.edu/microbio/chime/pe_beta/pe/protexpl/badpdbs.htm
pdbcodes_to_build = ['1AS5', '1CBN', '1DPO', '1IGY', '1HAG', '1IAO', '4CPA', '1QCQ']
# DEBUG: Small test cases.
pdbcodes_to_build = ['110D', '116D', '117D', '118D', '134D', '135D', '136D', '138D', '143D', '148D', '151D', '152D', '159D', '177D', '17RA', '183D', '184D', '186D', '187D', '188D', '189D', '1A11', '1A13', '1A1P', '1A3P', '1A51', '1A60', '1A83', '1A9L', '1AAF', '1AB1', '1ABZ', '1AC7', '1ACW', '1AD7', '1ADX', '1AFP', '1AFT', '1AFX', '1AG7', '1AGG', '1AGL', '1AGT', '1AHL', '1AIE', '1AJ1', '1AJF', '1AJJ', '1AJU', '1AKG', '1AKX', '1AL1', '1ALE', '1ALF', '1ALG', '1AM0', '1AMB', '1AMC', '1AML', '1ANP', '1ANR', '1ANS', '1AO9', '1AOO', '1APF', '1APO', '1APQ', '1AQG', '1AQO', '1AQQ', '1AQR', '1AQS', '1ARD', '1ARE', '1ARF', '1ARJ', '1ARK', '1AS5', '1AT4', '1ATO', '1ATV', '1ATW', '1ATX', '1AV3', '1AW4', '1AW6', '1AWY', '1AXH', '1AY3', '1AYJ', '1AZ6', '1AZH', '1AZJ', '1AZK', '1B03', '1B0Q', '1B13', '1B1V', '1B2J', '1B36', '1B45', '1B4G', '1B4I', '1B4Y', '1B5N', '1B8W', '1B9G', '1B9P', '1B9Q', '1B9U', '1BA4', '1BA5', '1BA6', '1BAH', '1BAL', '1BBA', '1BBG', '1BBL', '1BBO', '1BCV', '1BD1', '1BDC', '1BDD', '1BDE', '1BDK', '1BDS', '1BE7', '1BEI', '1BF0', '1BF9', '1BFW', '1BFY', '1BFZ', '1BGK', '1BGZ', '1BH0', '1BH1', '1BH4', '1BH7', '1BHI', '1BHP', '1BIG', '1BJB', '1BJC', '1BJH', '1BK2', '1BK8', '1BKT', '1BKU', '1BL1', '1BM4', '1BMX', '1BN0', '1BNB', '1BNX', '1BOE', '1BOR', '1BPI', '1BPT', '1BQ8', '1BQ9', '1BQF', '1BRF', '1BRV', '1BRZ', '1BTI', '1BTQ', '1BTR', '1BTS', '1BTT', '1BUB', '1BUS', '1BVJ', '1BW6', '1BWX', '1BX7', '1BX8', '1BY0', '1BY6', '1BYJ', '1BYV', '1BYY', '1BZ2', '1BZ3', '1BZB', '1BZG', '1BZK', '1BZT', '1BZU', '1C0O', '1C26', '1C2U', '1C32', '1C34', '1C35', '1C38', '1C49', '1C4B', '1C4E', '1C4S', '1C55', '1C56', '1C6W', '1C98', '1C9A', '1C9Z', '1CAA', '1CAD', '1CAP', '1CB3', '1CB9', '1CBH', '1CBN', '1CCF', '1CCM', '1CCN', '1CCQ', '1CCV', '1CE3', '1CE4', '1CEK', '1CEU', '1CFG', '1CFH', '1CFI', '1CHL', '1CHV', '1CIX', '1CKW', '1CKX', '1CKY', '1CKZ', '1CL4', '1CLF', '1CMR', '1CNL', '1CNN', '1CNR', '1CO4', '1COI', '1CQ0', '1CQ5', '1CQL', '1CQU', '1CR8', '1CRE', '1CRF', '1CRN', '1CS9', '1CSA', '1CT6', '1CTI', '1CV9', '1CVQ', '1CW5', '1CW6', '1CW8', '1CWX', '1CWZ', '1CXN', '1CXO', '1CXR', '1CXW', '1CYA', '1CYB', '1CZ6', '1D0R', '1D0T', '1D0U', '1D0W', '1D10', '1D11', '1D12', '1D13', '1D14', '1D15', '1D16', '1D17', '1D1E', '1D1F', '1D1H', '1D26', '1D2D', '1D2J', '1D2L', '1D33', '1D35', '1D36', '1D37', '1D38', '1D54', '1D58', '1D5Q', '1D61', '1D62', '1D67', '1D6B', '1D6X', '1D78', '1D79', '1D7N', '1D7T', '1D7Z', '1D82', '1D8G', '1D93', '1D9J', '1D9L', '1D9M', '1D9O', '1D9P', '1DA0', '1DA9', '1DB6', '1DEC', '1DEM', '1DEN', '1DEP', '1DF6', '1DFE', '1DFS', '1DFT', '1DFW', '1DFY', '1DFZ']
# impossible cases
pdbcodes_to_build = [
'1AO9', # contains residue DOP, which is not resolved in the ATOM records and does not appear to have a machine-readable definition anywhere
]
# DEBUG: A few small test cases.
pdbcodes_to_build = ['110D', '116D', '117D', '118D', '134D', '135D', '136D', '138D', '143D', '148D', '151D', '152D', '159D', '177D', '17RA', '183D', '184D', '186D', '187D', '188D', '189D', '1A11', '1A13', '1A1P', '1A3P', '1A51', '1A60', '1A83', '1A9L', '1AAF', '1AB1', '1ABZ', '1AC7', '1ACW', '1AD7', '1ADX', '1AFP', '1AFT', '1AFX', '1AG7', '1AGG', '1AGL', '1AGT', '1AHL', '1AIE', '1AJ1', '1AJF', '1AJJ', '1AJU', '1AKG', '1AKX', '1AL1', '1ALE', '1ALF', '1ALG', '1AM0', '1AMB', '1AMC', '1AML', '1ANP', '1ANR', '1ANS', '1AOO']
# Don't simulate any.
pdbcodes_to_simulate = []
# Keep track of list of failures.
failures = list()
for pdbcode in pdbcodes_to_build:
print("------------------------------------------------")
print(pdbcode)
output_pdb_filename = 'output.pdb'
# PDB setup parameters.
# TODO: Try several combinations?
from openmm import unit
pH = 7.0
ionic = 50.0 * unit.millimolar
box = 10.0 * unit.angstrom
positiveIon = 'Na+'
negativeIon = 'Cl-'
outfile = tempfile.NamedTemporaryFile(mode='w', delete=False)
output_pdb_filename = outfile.name
timeout_seconds = 30
watchdog = Watchdog(timeout_seconds)
build_successful = False
try:
from pdbfixer.pdbfixer import PDBFixer
from openmm import app
stage = "Creating PDBFixer..."
fixer = PDBFixer(pdbid=pdbcode)
stage = "Finding missing residues..."
fixer.findMissingResidues()
stage = "Finding nonstandard residues..."
fixer.findNonstandardResidues()
stage = "Replacing nonstandard residues..."
fixer.replaceNonstandardResidues()
stage = "Removing heterogens..."
fixer.removeHeterogens(False)
stage = "Finding missing atoms..."
fixer.findMissingAtoms()
stage = "Adding missing atoms..."
fixer.addMissingAtoms()
stage = "Adding missing hydrogens..."
fixer.addMissingHydrogens(pH)
stage = "Writing PDB file..."
app.PDBFile.writeFile(fixer.topology, fixer.positions, outfile)
stage = "Done."
outfile.close()
build_successful = True
except Watchdog:
message = "timed out in stage %s" % stage
print(message)
failures.append((pdbcode, Exception(message)))
except Exception as e:
print("EXCEPTION DURING BUILD")
#import traceback
#print traceback.print_exc()
print(str(e))
failures.append((pdbcode, e))
watchdog.stop()
del watchdog
# Test simulating this with OpenMM.
if (pdbcode in pdbcodes_to_simulate) and (build_successful):
watchdog = Watchdog(timeout_seconds)
try:
simulate(pdbcode, output_pdb_filename)
except Watchdog:
message = "timed out in simulation"
print(message)
failures.append((pdbcode, Exception(message)))
except Exception as e:
print("EXCEPTION DURING SIMULATE")
#import traceback
#print traceback.print_exc()
print(str(e))
failures.append((pdbcode, e))
watchdog.stop()
del watchdog
# Clean up.
os.remove(output_pdb_filename)
print("------------------------------------------------")
if len(failures) != 0:
print("")
print("SUMMARY OF FAILURES:")
print("")
for failure in failures:
(pdbcode, exception) = failure
print("%6s : %s" % (pdbcode, str(exception)))
print("")
raise Exception("Build test failed on one or more PDB files.")
else:
print("All tests succeeded.")
if __name__ == '__main__':
test_build_and_simulate()
| 9,799 | 45.445498 | 2,624 |
py
|
pdbfixer
|
pdbfixer-master/pdbfixer/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
pdbfixer
|
pdbfixer-master/pdbfixer/tests/test_removechains.py
|
import openmm.app as app
import pdbfixer
import tempfile
import time
from pathlib import Path
from urllib.request import urlopen
from io import StringIO
import pytest
@pytest.fixture(scope="module")
def file_content():
return (Path(__file__).parent / "data" / "4JSV.pdb").read_text()
def remove_chains_and_verify(file_content, expected_chain_ids_remaining, **kws):
# Create a PDBFixer instance for the given pdbid
fixer = pdbfixer.PDBFixer(pdbfile=StringIO(file_content))
# Remove specified chains.
fixer.removeChains(**kws)
# Check to make sure asserted chains remain.
chain_ids_remaining = [c.id for c in fixer.topology.chains()]
assert expected_chain_ids_remaining == chain_ids_remaining
def test_removechain_ids(file_content):
remove_chains_and_verify(file_content, ['B', 'D', 'A', 'C', 'B', 'A'], chainIds=[])
remove_chains_and_verify(file_content, ['A', 'C', 'A'], chainIds=['B', 'D'])
remove_chains_and_verify(file_content, ['B', 'D', 'B'], chainIds=['A', 'C'])
remove_chains_and_verify(file_content, ['D', 'C'], chainIds=['B', 'A'])
remove_chains_and_verify(file_content, [], chainIds=['B', 'D', 'A', 'C'])
def test_removechain_indices(file_content):
remove_chains_and_verify(file_content, ['B', 'D', 'A', 'C', 'B', 'A'], chainIndices=[])
remove_chains_and_verify(file_content, ['A', 'C', 'B', 'A'], chainIndices=[0, 1])
remove_chains_and_verify(file_content, ['B', 'D', 'B', 'A'], chainIndices=[2, 3])
remove_chains_and_verify(file_content, ['D', 'C', 'B', 'A'], chainIndices=[0, 2])
remove_chains_and_verify(file_content, [], chainIndices=[0, 1, 2, 3, 4, 5])
| 1,647 | 42.368421 | 91 |
py
|
SGA
|
SGA-main/setup.py
|
#!/usr/bin/env python
# Supports:
# - python setup.py install
#
# Does not support:
# - python setup.py test
# - python setup.py version
import os, glob
from setuptools import setup, find_packages
def _get_version():
import subprocess
version = subprocess.check_output('git describe', shell=True)
version = version.decode('utf-8').replace('\n', '')
return version
version = _get_version()
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup_kwargs=dict(
name='SGA',
url='https://github.com/moustakas/SGA',
version=version,
author='John Moustakas',
author_email='[email protected]',
#packages=[],
license=license,
description='Siena Galaxy Atlas',
long_description=readme,
)
#- What to install
setup_kwargs['packages'] = find_packages('py')
setup_kwargs['package_dir'] = {'':'py'}
#- Treat everything in bin/ as a script to be installed
setup_kwargs['scripts'] = glob.glob(os.path.join('bin', '*'))
#- Data to include
# setup_kwargs['package_data'] = {
# 'legacyhalos': ['data/*',],
# 'legacyhalos.test': ['data/*',],
# }
#- Testing
#setup_kwargs['test_suite'] = 'SGA.test.test_suite'
#- Go!
setup(**setup_kwargs)
| 1,244 | 20.842105 | 65 |
py
|
SGA
|
SGA-main/science/SGA2020/sga-bgspv-targets.py
|
#!/usr/bin/env python
import os, pdb
import numpy as np
import fitsio
from glob import glob
from astropy.table import Table, vstack, join
from desitarget.targetmask import desi_mask, bgs_mask, scnd_mask
pvnames = ['PV_BRIGHT_HIGH', 'PV_BRIGHT_MEDIUM', 'PV_BRIGHT_LOW',
'PV_DARK_HIGH', 'PV_DARK_MEDIUM', 'PV_DARK_LOW']
cols = ['RA', 'DEC', 'REF_CAT', 'REF_ID', 'DESI_TARGET', 'BGS_TARGET', 'SCND_TARGET', 'TARGETID']
out = []
for survey in ['bright', 'dark']:
print(f'Working on {survey}')
tfiles = glob(os.getenv('DESI_TARGET')+f'/catalogs/dr9/1.1.1/targets/main/resolve/{survey}/targets-{survey}-*.fits')
for tfile in tfiles:#[:2]:
tt = Table(fitsio.read(tfile, 'TARGETS', columns=cols))
I = np.where((desi_mask.mask('BGS_ANY') & tt['DESI_TARGET'] != 0) * (tt['REF_CAT'] == 'L3'))[0]
tbgs = []
if len(I) > 0:
tbgs = tt[I]
tbgs['TARG'] = 'BGS'
J = []
for pvname in pvnames:
J.append(np.where(scnd_mask.mask(pvname) & tt['SCND_TARGET'] != 0)[0])
J = np.hstack(J)
tpv = []
if len(J) > 0:
tpv = tt[J]
tpv['TARG'] = 'PV'
_out = []
if len(tbgs) > 0 and len(tpv) > 0:
_out = join(tbgs, tpv, keys=cols)
_, uindx = np.unique(_out['TARGETID'], return_index=True)
_out = _out[uindx]
if 'TARG_1' in _out.colnames:
_out['TARG'] = [t1+'-'+t2 for t1, t2 in zip(_out['TARG_1'].data, _out['TARG_2'].data)]
_out.remove_columns(['TARG_1', 'TARG_2'])
elif len(tbgs) > 0 and len(tpv) == 0:
_out = tbgs
elif len(tbgs) == 0 and len(tpv) > 0:
_out = tpv
if len(_out) > 0:
_out['FILENAME'] = os.path.basename(tfile)
_out['SURVEY'] = survey
out.append(_out)
out = vstack(out)
# ignores bright-dark overlap
_, uindx = np.unique(out['TARGETID'], return_index=True)
out = out[uindx]
print(len(out))
out.write('sga-bgspv-targets.fits', overwrite=True)
| 2,087 | 33.229508 | 120 |
py
|
SGA
|
SGA-main/py/SGA/galex.py
|
"""
SGA.galex
=========
Code to generate GALEX custom coadds / mosaics.
"""
import os, pdb
import numpy as np
from astrometry.util.util import Tan
from astrometry.util.fits import fits_table
import SGA.misc
def _ra_ranges_overlap(ralo, rahi, ra1, ra2):
import numpy as np
x1 = np.cos(np.deg2rad(ralo))
y1 = np.sin(np.deg2rad(ralo))
x2 = np.cos(np.deg2rad(rahi))
y2 = np.sin(np.deg2rad(rahi))
x3 = np.cos(np.deg2rad(ra1))
y3 = np.sin(np.deg2rad(ra1))
x4 = np.cos(np.deg2rad(ra2))
y4 = np.sin(np.deg2rad(ra2))
cw32 = x2*y3 - x3*y2
cw41 = x1*y4 - x4*y1
return np.logical_and(cw32 <= 0, cw41 >= 0)
def _galex_rgb_dstn(imgs, **kwargs):
nuv,fuv = imgs
h,w = nuv.shape
myrgb = np.zeros((h,w,3), np.float32)
lo,hi = -0.0005, 0.01
myrgb[:,:,0] = myrgb[:,:,1] = np.clip((nuv - lo) / (hi - lo), 0., 1.)
lo,hi = -0.00015, 0.003
myrgb[:,:,2] = np.clip((fuv - lo) / (hi - lo), 0., 1.)
myrgb[:,:,1] = np.clip((myrgb[:,:,0] + myrgb[:,:,2]*0.2), 0., 1.)
return myrgb
def _galex_rgb_official(imgs, **kwargs):
from scipy.ndimage.filters import uniform_filter, gaussian_filter
nuv,fuv = imgs
h,w = nuv.shape
red = nuv * 0.206 * 2297
blue = fuv * 1.4 * 1525
#blue = uniform_filter(blue, 3)
blue = gaussian_filter(blue, 1.)
green = (0.2*blue + 0.8*red)
red *= 0.085
green *= 0.095
blue *= 0.08
nonlinearity = 2.5
radius = red + green + blue
val = np.arcsinh(radius * nonlinearity) / nonlinearity
with np.errstate(divide='ignore', invalid='ignore'):
red = red * val / radius
green = green * val / radius
blue = blue * val / radius
mx = np.maximum(red, np.maximum(green, blue))
mx = np.maximum(1., mx)
red /= mx
green /= mx
blue /= mx
rgb = np.clip(np.dstack((red, green, blue)), 0., 1.)
return rgb
def _galex_rgb_moustakas(imgs, **kwargs):
#from scipy.ndimage.filters import uniform_filter, gaussian_filter
nuv,fuv = imgs
h,w = nuv.shape
red = nuv * 0.206 * 2297
blue = fuv * 1.4 * 1525
#blue = uniform_filter(blue, 3)
#blue = gaussian_filter(blue, 1.)
green = (0.2*blue + 0.8*red)
red *= 0.085
green *= 0.095
blue *= 0.08
nonlinearity = 0.5 # 1.0 # 2.5
radius = red + green + blue
val = np.arcsinh(radius * nonlinearity) / nonlinearity
with np.errstate(divide='ignore', invalid='ignore'):
red = red * val / radius
green = green * val / radius
blue = blue * val / radius
mx = np.maximum(red, np.maximum(green, blue))
mx = np.maximum(1., mx)
lo = -0.1
red = (red - lo) / (mx - lo)
green = (green - lo) / (mx - lo)
blue = (blue - lo) / (mx - lo)
#red /= mx
#green /= mx
#blue /= mx
rgb = np.clip(np.dstack((red, green, blue)), 0., 1.)
return rgb
def _read_galex_tiles(targetwcs, galex_dir, log=None, verbose=False):
"""Find and read the overlapping GALEX FUV/NUV tiles."""
H, W = targetwcs.shape
ralo, declo = targetwcs.pixelxy2radec(W, 1)
rahi, dechi = targetwcs.pixelxy2radec(1, H)
#print('RA', ralo,rahi)
#print('Dec', declo,dechi)
fn = os.path.join(galex_dir, 'galex-images.fits')
#print('Reading', fn)
# galex "bricks" (actually just GALEX tiles)
galex_tiles = fits_table(fn)
galex_tiles.rename('ra_cent', 'ra')
galex_tiles.rename('dec_cent', 'dec')
galex_tiles.rename('have_n', 'has_n')
galex_tiles.rename('have_f', 'has_f')
cosd = np.cos(np.deg2rad(galex_tiles.dec))
galex_tiles.ra1 = galex_tiles.ra - 3840*1.5/3600./2./cosd
galex_tiles.ra2 = galex_tiles.ra + 3840*1.5/3600./2./cosd
galex_tiles.dec1 = galex_tiles.dec - 3840*1.5/3600./2.
galex_tiles.dec2 = galex_tiles.dec + 3840*1.5/3600./2.
bricknames = []
for tile, subvis in zip(galex_tiles.tilename, galex_tiles.subvis):
if subvis == -999:
bricknames.append(tile.strip())
else:
bricknames.append('%s_sg%02i' % (tile.strip(), subvis))
galex_tiles.brickname = np.array(bricknames)
# bricks_touching_radec_box(self, ralo, rahi, declo, dechi, scale=None):
I, = np.nonzero((galex_tiles.dec1 <= dechi) * (galex_tiles.dec2 >= declo))
ok = _ra_ranges_overlap(ralo, rahi, galex_tiles.ra1[I], galex_tiles.ra2[I])
I = I[ok]
galex_tiles.cut(I)
if verbose:
print('-> bricks', galex_tiles.brickname, flush=True, file=log)
return galex_tiles
def galex_coadds(onegal, galaxy=None, radius_mosaic=30, radius_mask=None,
pixscale=1.5, ref_pixscale=0.262, output_dir=None, galex_dir=None,
log=None, centrals=True, verbose=False):
'''Generate custom GALEX cutouts.
radius_mosaic and radius_mask in arcsec
pixscale: GALEX pixel scale in arcsec/pixel.
'''
import fitsio
import matplotlib.pyplot as plt
from astrometry.libkd.spherematch import match_radec
from astrometry.util.resample import resample_with_wcs, OverlapError
from tractor import (Tractor, NanoMaggies, Image, LinearPhotoCal,
NCircularGaussianPSF, ConstantFitsWcs, ConstantSky)
from legacypipe.survey import imsave_jpeg
from legacypipe.catalog import read_fits_catalog
if galaxy is None:
galaxy = 'galaxy'
if galex_dir is None:
galex_dir = os.environ.get('GALEX_DIR')
if output_dir is None:
output_dir = '.'
if radius_mask is None:
radius_mask = radius_mosaic
radius_search = 5.0 # [arcsec]
else:
radius_search = radius_mask
W = H = np.ceil(2 * radius_mosaic / pixscale).astype('int') # [pixels]
targetwcs = Tan(onegal['RA'], onegal['DEC'], (W+1) / 2.0, (H+1) / 2.0,
-pixscale / 3600.0, 0.0, 0.0, pixscale / 3600.0, float(W), float(H))
# Read the custom Tractor catalog
tractorfile = os.path.join(output_dir, '{}-tractor.fits'.format(galaxy))
if not os.path.isfile(tractorfile):
print('Missing Tractor catalog {}'.format(tractorfile))
return 0
cat = fits_table(tractorfile)
print('Read {} sources from {}'.format(len(cat), tractorfile), flush=True, file=log)
keep = np.ones(len(cat)).astype(bool)
if centrals:
# Find the large central galaxy and mask out (ignore) all the models
# which are within its elliptical mask.
# This algorithm will have to change for mosaics not centered on large
# galaxies, e.g., in galaxy groups.
m1, m2, d12 = match_radec(cat.ra, cat.dec, onegal['RA'], onegal['DEC'],
radius_search/3600.0, nearest=False)
if len(m1) == 0:
print('No central galaxies found at the central coordinates!', flush=True, file=log)
else:
pixfactor = ref_pixscale / pixscale # shift the optical Tractor positions
for mm in m1:
morphtype = cat.type[mm].strip()
if morphtype == 'EXP' or morphtype == 'COMP':
e1, e2, r50 = cat.shapeexp_e1[mm], cat.shapeexp_e2[mm], cat.shapeexp_r[mm] # [arcsec]
elif morphtype == 'DEV' or morphtype == 'COMP':
e1, e2, r50 = cat.shapedev_e1[mm], cat.shapedev_e2[mm], cat.shapedev_r[mm] # [arcsec]
else:
r50 = None
if r50:
majoraxis = r50 * 5 / pixscale # [pixels]
ba, phi = SGA.misc.convert_tractor_e1e2(e1, e2)
these = SGA.misc.ellipse_mask(W / 2, W / 2, majoraxis, ba * majoraxis,
np.radians(phi), cat.bx*pixfactor, cat.by*pixfactor)
if np.sum(these) > 0:
#keep[these] = False
pass
print('Hack!')
keep[mm] = False
#srcs = read_fits_catalog(cat)
#_srcs = np.array(srcs)[~keep].tolist()
#mod = SGA.misc.srcs2image(_srcs, ConstantFitsWcs(targetwcs), psf_sigma=3.0)
#import matplotlib.pyplot as plt
##plt.imshow(mod, origin='lower') ; plt.savefig('junk.png')
#plt.imshow(np.log10(mod), origin='lower') ; plt.savefig('junk.png')
#pdb.set_trace()
srcs = read_fits_catalog(cat)
for src in srcs:
src.freezeAllBut('brightness')
#srcs_nocentral = np.array(srcs)[keep].tolist()
# Find all overlapping GALEX tiles and then read the tims.
galex_tiles = _read_galex_tiles(targetwcs, galex_dir, log=log, verbose=verbose)
gbands = ['n','f']
nicegbands = ['NUV', 'FUV']
zps = dict(n=20.08, f=18.82)
coimgs, comods, coresids, coimgs_central, comods_nocentral = [], [], [], [], []
for niceband, band in zip(nicegbands, gbands):
J = np.flatnonzero(galex_tiles.get('has_'+band))
print(len(J), 'GALEX tiles have coverage in band', band)
coimg = np.zeros((H, W), np.float32)
comod = np.zeros((H, W), np.float32)
cowt = np.zeros((H, W), np.float32)
comod_nocentral = np.zeros((H, W), np.float32)
for src in srcs:
src.setBrightness(NanoMaggies(**{band: 1}))
for j in J:
brick = galex_tiles[j]
fn = os.path.join(galex_dir, brick.tilename.strip(),
'%s-%sd-intbgsub.fits.gz' % (brick.brickname, band))
#print(fn)
gwcs = Tan(*[float(f) for f in
[brick.crval1, brick.crval2, brick.crpix1, brick.crpix2,
brick.cdelt1, 0., 0., brick.cdelt2, 3840., 3840.]])
img = fitsio.read(fn)
#print('Read', img.shape)
try:
Yo, Xo, Yi, Xi, nil = resample_with_wcs(targetwcs, gwcs, [], 3)
except OverlapError:
continue
K = np.flatnonzero(img[Yi, Xi] != 0.)
if len(K) == 0:
continue
Yo, Xo, Yi, Xi = Yo[K], Xo[K], Yi[K], Xi[K]
wt = brick.get(band + 'exptime')
coimg[Yo, Xo] += wt * img[Yi, Xi]
cowt [Yo, Xo] += wt
x0, x1, y0, y1 = min(Xi), max(Xi), min(Yi), max(Yi)
subwcs = gwcs.get_subimage(x0, y0, x1-x0+1, y1-y0+1)
twcs = ConstantFitsWcs(subwcs)
timg = img[y0:y1+1, x0:x1+1]
tie = np.ones_like(timg) ## HACK!
#hdr = fitsio.read_header(fn)
#zp = hdr['']
zp = zps[band]
photocal = LinearPhotoCal( NanoMaggies.zeropointToScale(zp), band=band)
tsky = ConstantSky(0.0)
# HACK -- circular Gaussian PSF of fixed size...
# in arcsec
#fwhms = dict(NUV=6.0, FUV=6.0)
# -> sigma in pixels
#sig = fwhms[band] / 2.35 / twcs.pixel_scale()
sig = 6.0 / np.sqrt(8 * np.log(2)) / twcs.pixel_scale()
tpsf = NCircularGaussianPSF([sig], [1.])
tim = Image(data=timg, inverr=tie, psf=tpsf, wcs=twcs, sky=tsky,
photocal=photocal, name='GALEX ' + band + brick.brickname)
## Build the model image with and without the central galaxy model.
tractor = Tractor([tim], srcs)
mod = tractor.getModelImage(0)
tractor.freezeParam('images')
tractor.optimize_forced_photometry(priors=False, shared_params=False)
mod = tractor.getModelImage(0)
srcs_nocentral = np.array(srcs)[keep].tolist()
#srcs_nocentral = np.array(srcs)[nocentral].tolist()
tractor_nocentral = Tractor([tim], srcs_nocentral)
mod_nocentral = tractor_nocentral.getModelImage(0)
comod[Yo, Xo] += wt * mod[Yi-y0, Xi-x0]
comod_nocentral[Yo, Xo] += wt * mod_nocentral[Yi-y0, Xi-x0]
coimg /= np.maximum(cowt, 1e-18)
comod /= np.maximum(cowt, 1e-18)
comod_nocentral /= np.maximum(cowt, 1e-18)
coresid = coimg - comod
# Subtract the model image which excludes the central (comod_nocentral)
# from the data (coimg) to isolate the light of the central
# (coimg_central).
coimg_central = coimg - comod_nocentral
coimgs.append(coimg)
comods.append(comod)
coresids.append(coresid)
comods_nocentral.append(comod_nocentral)
coimgs_central.append(coimg_central)
# Write out the final images with and without the central, making sure
# to apply the zeropoint to go from counts/s to AB nanomaggies.
# https://asd.gsfc.nasa.gov/archive/galex/FAQ/counts_background.html
for thisimg, imtype in zip( (coimg, comod, comod_nocentral),
('image', 'model', 'model-nocentral') ):
fitsfile = os.path.join(output_dir, '{}-{}-{}.fits'.format(galaxy, imtype, niceband))
if verbose:
print('Writing {}'.format(fitsfile))
fitsio.write(fitsfile, thisimg * 10**(-0.4 * (zp - 22.5)), clobber=True)
# Build a color mosaic (but note that the images here are in units of
# background-subtracted counts/s).
#_galex_rgb = _galex_rgb_moustakas
#_galex_rgb = _galex_rgb_dstn
_galex_rgb = _galex_rgb_official
for imgs, imtype in zip( (coimgs, comods, coresids, comods_nocentral, coimgs_central),
('image', 'model', 'resid', 'model-nocentral', 'image-central') ):
rgb = _galex_rgb(imgs)
jpgfile = os.path.join(output_dir, '{}-{}-FUVNUV.jpg'.format(galaxy, imtype))
if verbose:
print('Writing {}'.format(jpgfile))
imsave_jpeg(jpgfile, rgb, origin='lower')
return 1
| 13,777 | 36.237838 | 105 |
py
|
SGA
|
SGA-main/py/SGA/html.py
|
"""
SGA.html
========
Code to generate HTML output for the various stages of the SGA analysis.
"""
import os
import numpy as np
def get_layer(onegal):
if onegal['DR'] == 'dr6':
layer = 'mzls+bass-dr6'
elif onegal['DR'] == 'dr7':
layer = 'decals-dr5'
else:
print('Unrecognized data release {}!'.format(onegal['DR']))
raise ValueError
return layer
def _get_cutouts_one(args):
"""Wrapper function for the multiprocessing."""
return get_cutouts_one(*args)
def get_cutouts_one(group, clobber=False):
"""Get viewer cutouts for a single galaxy."""
layer = get_layer(group)
groupname = get_groupname(group)
diam = group_diameter(group) # [arcmin]
size = np.ceil(diam * 60 / PIXSCALE).astype('int') # [pixels]
imageurl = '{}/?ra={:.8f}&dec={:.8f}&pixscale={:.3f}&size={:g}&layer={}'.format(
cutouturl, group['ra'], group['dec'], PIXSCALE, size, layer)
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(groupname))
cmd = 'wget --continue -O {:s} "{:s}"' .format(jpgfile, imageurl)
if os.path.isfile(jpgfile) and not clobber:
print('File {} exists...skipping.'.format(jpgfile))
else:
if os.path.isfile(jpgfile):
os.remove(jpgfile)
print(cmd)
os.system(cmd)
def get_cutouts(groupsample, use_nproc=nproc, clobber=False):
"""Get viewer cutouts of the whole sample."""
cutoutargs = list()
for gg in groupsample:
cutoutargs.append( (gg, clobber) )
if use_nproc > 1:
p = multiprocessing.Pool(nproc)
p.map(_get_cutouts_one, cutoutargs)
p.close()
else:
for args in cutoutargs:
_get_cutouts_one(args)
return
def _add_labels_one(args):
"""Wrapper function for the multiprocessing."""
return add_labels_one(*args)
def add_labels_one(group, sample, clobber=False, nothumb=False):
jpgdir = os.path.join(SGAdir, 'cutouts', 'jpg')
pngdir = os.path.join(SGAdir, 'cutouts', 'png')
if not os.path.isdir(pngdir):
os.mkdir(pngdir)
groupname = get_groupname(group)
galaxy = get_galaxy(group, sample, html=True)
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(groupname))
pngfile = os.path.join(pngdir, '{}.png'.format(groupname))
thumbfile = os.path.join(pngdir, 'thumb-{}.png'.format(groupname))
if os.path.isfile(jpgfile):
if os.path.isfile(pngfile) and not clobber:
print('File {} exists...skipping.'.format(pngfile))
else:
im = Image.open(jpgfile)
sz = im.size
fntsize = np.round(sz[0]/28).astype('int')
width = np.round(sz[0]/175).astype('int')
font = ImageFont.truetype(fonttype, size=fntsize)
draw = ImageDraw.Draw(im)
# Label the group--
draw.text((0+fntsize*2, 0+fntsize*2), galaxy, font=font)
# Add a scale bar--
x0, x1, yy = sz[1]-fntsize*2-barlen, sz[1]-fntsize*2, sz[0]-fntsize*2
draw.line((x0, yy, x1, yy), fill='white', width=width)
im.save(pngfile)
# Generate a thumbnail
if not nothumb:
cmd = 'convert -thumbnail 300x300 {} {}'.format(pngfile, thumbfile)
os.system(cmd)
def add_labels(groupsample, sample, clobber=False):
labelargs = list()
for group in groupsample:
labelargs.append((group, sample, clobber))
if nproc > 1:
p = multiprocessing.Pool(nproc)
res = p.map(_add_labels_one, labelargs)
p.close()
else:
for args in labelargs:
res = _add_labels_one(args)
def html_rows(_groupkeep, sample, nperrow=4):
# Not all objects may have been analyzed.
these = [os.path.isfile(os.path.join(SGAdir, 'cutouts', 'png', '{}.png'.format(
get_groupname(gg)))) for gg in _groupkeep]
groupkeep = _groupkeep[these]
nrow = np.ceil(len(groupkeep) / nperrow).astype('int')
groupsplit = list()
for ii in range(nrow):
i1 = nperrow*ii
i2 = nperrow*(ii+1)
if i2 > len(groupkeep):
i2 = len(groupkeep)
groupsplit.append(groupkeep[i1:i2])
print('Splitting the sample into {} rows with {} mosaics per row.'.format(nrow, nperrow))
html.write('<table class="ls-gallery">\n')
html.write('<tbody>\n')
for grouprow in groupsplit:
html.write('<tr>\n')
for group in grouprow:
groupname = get_groupname(group)
galaxy = get_galaxy(group, sample, html=True)
pngfile = os.path.join('cutouts', 'png', '{}.png'.format(groupname))
thumbfile = os.path.join('cutouts', 'png', 'thumb-{}.png'.format(groupname))
img = 'src="{}" alt="{}"'.format(thumbfile, galaxy)
#img = 'class="ls-gallery" src="{}" alt="{}"'.format(thumbfile, nicename)
html.write('<td><a href="{}"><img {}></a></td>\n'.format(pngfile, img))
html.write('</tr>\n')
html.write('<tr>\n')
for group in grouprow:
groupname = get_groupname(group)
galaxy = '{}: {}'.format(groupname.upper(), get_galaxy(group, sample, html=True))
layer = get_layer(group)
href = '{}/?layer={}&ra={:.8f}&dec={:.8f}&zoom=12'.format(viewerurl, layer, group['ra'], group['dec'])
html.write('<td><a href="{}" target="_blank">{}</a></td>\n'.format(href, galaxy))
html.write('</tr>\n')
html.write('</tbody>\n')
html.write('</table>\n')
def make_plots(sample, analysisdir=None, htmldir='.', refband='r',
band=('g', 'r', 'z'), clobber=False, verbose=True):
"""Make QA plots.
"""
sample_trends(sample, htmldir, analysisdir=analysisdir, verbose=verbose)
for gal in sample:
objid, objdir = get_objid(gal, analysisdir=analysisdir)
htmlobjdir = os.path.join(htmldir, '{}'.format(objid))
if not os.path.isdir(htmlobjdir):
os.makedirs(htmlobjdir, exist_ok=True)
# Build the ellipse plots.
qa_ellipse_results(objid, objdir, htmlobjdir, band=band,
clobber=clobber, verbose=verbose)
qa_sersic_results(objid, objdir, htmlobjdir, band=band,
clobber=clobber, verbose=verbose)
# Build the montage coadds.
qa_montage_coadds(objid, objdir, htmlobjdir, clobber=clobber, verbose=verbose)
# Build the MGE plots.
#qa_mge_results(objid, objdir, htmlobjdir, refband='r', band=band,
# clobber=clobber, verbose=verbose)
def _javastring():
"""Return a string that embeds a date in a webpage."""
import textwrap
js = textwrap.dedent("""
<SCRIPT LANGUAGE="JavaScript">
var months = new Array(13);
months[1] = "January";
months[2] = "February";
months[3] = "March";
months[4] = "April";
months[5] = "May";
months[6] = "June";
months[7] = "July";
months[8] = "August";
months[9] = "September";
months[10] = "October";
months[11] = "November";
months[12] = "December";
var dateObj = new Date(document.lastModified)
var lmonth = months[dateObj.getMonth() + 1]
var date = dateObj.getDate()
var fyear = dateObj.getYear()
if (fyear < 2000)
fyear = fyear + 1900
document.write(" " + fyear + " " + lmonth + " " + date)
</SCRIPT>
""")
return js
def make_html(sample=None, htmldir=None, dr='dr6-dr7', makeplots=True, clobber=False,
verbose=True):
"""Make the HTML pages.
"""
import SGA.io
if htmldir is None:
htmldir = SGA.io.html_dir()
sample = SGA.io.read_parent(dr=dr)
objid, objdir = legacyhalos.io.get_objid(sample)
reject = []
toss = np.zeros(len(groupsample), dtype=bool)
for ii, gg in enumerate(groupsample['groupid']):
for rej in np.atleast_1d(reject):
toss[ii] = rej in gg.lower()
if toss[ii]:
break
print('Rejecting {} groups.'.format(np.sum(toss)))
groupkeep = groupsample[~toss]
if np.sum(toss) > 0:
grouprej = groupsample[toss]
else:
grouprej = []
# Write the last-updated date to a webpage.
js = _javastring()
# Get the viewer link
def _viewer_link(gal, dr):
baseurl = 'http://legacysurvey.org/viewer/'
width = 2 * cutout_radius_150kpc(redshift=gal['z'], pixscale=0.262) # [pixels]
if width > 400:
zoom = 14
else:
zoom = 15
viewer = '{}?ra={:.6f}&dec={:.6f}&zoom={:g}&layer=decals-{}'.format(
baseurl, gal['ra'], gal['dec'], zoom, dr)
return viewer
homehtml = 'index.html'
# Build the home (index.html) page--
if not os.path.exists(htmldir):
os.makedirs(htmldir)
htmlfile = os.path.join(htmldir, homehtml)
with open(htmlfile, 'w') as html:
html.write('<html><head>\n')
html.write('<style type="text/css">\n')
html.write('table.ls-gallery {width: 90%;}\n')
html.write('p.ls-gallery {width: 80%;}\n')
html.write('</style>\n')
html.write('</head><body>\n')
html.write('<h1>Siena Galaxy Atlas 2020 (SGA-2020)</h1>\n')
html.write("""<p class="ls-gallery">Each thumbnail links to a larger image while the galaxy
name below each thumbnail links to the <a href="http://legacysurvey.org/viewer">Sky Viewer</a>.
For reference, the horizontal white bar in the lower-right corner of each image represents
one arcminute.</p>\n""")
html_rows(groupkeep, sample)
html.write('<br /><br />\n')
html.write('<b><i>Last updated {}</b></i>\n'.format(js))
html.write('</body></html>\n')
if makeplots:
make_plots(sample, analysisdir=analysisdir, htmldir=htmldir, refband=refband,
band=band, clobber=clobber, verbose=verbose)
| 9,966 | 33.607639 | 114 |
py
|
SGA
|
SGA-main/py/SGA/unwise.py
|
"""
SGA.unwise
==========
Code to generate unWISE custom coadds / mosaics.
"""
import os, pdb
import numpy as np
import SGA.misc
def _unwise_to_rgb(imgs, bands=[1,2], mn=-1, mx=100, arcsinh=1.0):
"""Support routine to generate color unWISE images.
Note that the input images need to be in *Vega* nanomaggies!
"""
img = imgs[0]
H, W = img.shape
## FIXME
w1,w2 = imgs
rgb = np.zeros((H, W, 3), np.uint8)
scale1 = 50.
scale2 = 50.
#mn,mx = -3.,30.
#arcsinh = None
img1 = w1 / scale1
img2 = w2 / scale2
#print('W1 99th', np.percentile(img1, 99))
#print('W2 99th', np.percentile(img2, 99))
if arcsinh is not None:
def nlmap(x):
return np.arcsinh(x * arcsinh) / np.sqrt(arcsinh)
#img1 = nlmap(img1)
#img2 = nlmap(img2)
mean = (img1 + img2) / 2.
I = nlmap(mean)
img1 = img1 / mean * I
img2 = img2 / mean * I
mn = nlmap(mn)
mx = nlmap(mx)
img1 = (img1 - mn) / (mx - mn)
img2 = (img2 - mn) / (mx - mn)
rgb[:,:,2] = (np.clip(img1, 0., 1.) * 255).astype(np.uint8)
rgb[:,:,0] = (np.clip(img2, 0., 1.) * 255).astype(np.uint8)
rgb[:,:,1] = rgb[:, :, 0] / 2 + rgb[:, :, 2] / 2
return rgb
def unwise_coadds(onegal, galaxy=None, radius_mosaic=30, radius_mask=None,
pixscale=2.75, ref_pixscale=0.262, output_dir=None,
unwise_dir=None, verbose=False, log=None, centrals=True):
'''Generate custom unWISE cutouts.
radius_mosaic and radius_mask in arcsec
pixscale: WISE pixel scale in arcsec/pixel; make this smaller than 2.75
to oversample.
'''
import fitsio
import matplotlib.pyplot as plt
from astrometry.util.util import Tan
from astrometry.util.fits import fits_table
from astrometry.libkd.spherematch import match_radec
from astrometry.util.resample import resample_with_wcs, ResampleError
from wise.forcedphot import unwise_tiles_touching_wcs
from wise.unwise import get_unwise_tractor_image
from tractor import Tractor, Image, NanoMaggies
from legacypipe.survey import imsave_jpeg
from legacypipe.catalog import read_fits_catalog
if galaxy is None:
galaxy = 'galaxy'
if output_dir is None:
output_dir = '.'
if unwise_dir is None:
unwise_dir = os.environ.get('UNWISE_COADDS_DIR')
if radius_mask is None:
radius_mask = radius_mosaic
radius_search = 5.0 # [arcsec]
else:
radius_search = radius_mask
# Initialize the WCS object.
W = H = np.ceil(2 * radius_mosaic / pixscale).astype('int') # [pixels]
targetwcs = Tan(onegal['RA'], onegal['DEC'], (W + 1) / 2.0, (H + 1) / 2.0,
-pixscale / 3600.0, 0.0, 0.0, pixscale / 3600.0, float(W), float(H))
# Read the custom Tractor catalog.
tractorfile = os.path.join(output_dir, '{}-tractor.fits'.format(galaxy))
if not os.path.isfile(tractorfile):
print('Missing Tractor catalog {}'.format(tractorfile), flush=True, file=log)
return 0
primhdr = fitsio.read_header(tractorfile)
cat = fits_table(tractorfile)
print('Read {} sources from {}'.format(len(cat), tractorfile), flush=True, file=log)
keep = np.ones(len(cat)).astype(bool)
if centrals:
# Find the large central galaxy and mask out (ignore) all the models
# which are within its elliptical mask.
# This algorithm will have to change for mosaics not centered on large
# galaxies, e.g., in galaxy groups.
m1, m2, d12 = match_radec(cat.ra, cat.dec, onegal['RA'], onegal['DEC'],
radius_search/3600.0, nearest=False)
if len(m1) == 0:
print('No central galaxies found at the central coordinates!', flush=True, file=log)
else:
pixfactor = ref_pixscale / pixscale # shift the optical Tractor positions
for mm in m1:
morphtype = cat.type[mm].strip()
if morphtype == 'EXP' or morphtype == 'COMP':
e1, e2, r50 = cat.shapeexp_e1[mm], cat.shapeexp_e2[mm], cat.shapeexp_r[mm] # [arcsec]
elif morphtype == 'DEV' or morphtype == 'COMP':
e1, e2, r50 = cat.shapedev_e1[mm], cat.shapedev_e2[mm], cat.shapedev_r[mm] # [arcsec]
else:
r50 = None
if r50:
majoraxis = r50 * 5 / pixscale # [pixels]
ba, phi = SGA.misc.convert_tractor_e1e2(e1, e2)
these = SGA.misc.ellipse_mask(W / 2, W / 2, majoraxis, ba * majoraxis,
np.radians(phi), cat.bx*pixfactor, cat.by*pixfactor)
if np.sum(these) > 0:
#keep[these] = False
pass
print('Hack!')
keep[mm] = False
#srcs = read_fits_catalog(cat)
#_srcs = np.array(srcs)[~keep].tolist()
#mod = SGA.misc.srcs2image(_srcs, ConstantFitsWcs(targetwcs), psf_sigma=3.0)
#import matplotlib.pyplot as plt
##plt.imshow(mod, origin='lower') ; plt.savefig('junk.png')
#plt.imshow(np.log10(mod), origin='lower') ; plt.savefig('junk.png')
#pdb.set_trace()
srcs = read_fits_catalog(cat)
for src in srcs:
src.freezeAllBut('brightness')
#srcs_nocentral = np.array(srcs)[keep].tolist()
cat_nocentral = cat[keep]
## Find and remove all the objects within XX arcsec of the target
## coordinates.
#m1, m2, d12 = match_radec(T.ra, T.dec, onegal['RA'], onegal['DEC'], 5/3600.0, nearest=False)
#if len(d12) == 0:
# print('No matching galaxies found -- probably not what you wanted.')
# #raise ValueError
# nocentral = np.ones(len(T)).astype(bool)
#else:
# nocentral = ~np.isin(T.objid, T[m1].objid)
#T_nocentral = T[nocentral]
# Find and read the overlapping unWISE tiles. Assume the targetwcs is
# axis-aligned and that the edge midpoints yield the RA, Dec limits (true
# for TAN). Note: the way the roiradec box is used, the min/max order
# doesn't matter.
r, d = targetwcs.pixelxy2radec(np.array([1, W, W/2, W/2]),
np.array([H/2, H/2, 1, H ]))
roiradec = [r[0], r[1], d[2], d[3]]
tiles = unwise_tiles_touching_wcs(targetwcs)
wbands = [1, 2, 3, 4]
wanyband = 'w'
vega_to_ab = dict(w1=2.699, w2=3.339, w3=5.174, w4=6.620)
# Convert the AB WISE fluxes in the Tractor catalog to Vega nanomaggies so
# they're consistent with the coadds, below.
for band in wbands:
f = cat.get('flux_w{}'.format(band))
e = cat.get('flux_ivar_w{}'.format(band))
print('Setting negative fluxes equal to zero!')
f[f < 0] = 0
#f[f/e < 3] = 0
f *= 10**(0.4 * vega_to_ab['w{}'.format(band)])
coimgs = [np.zeros((H, W), np.float32) for b in wbands]
comods = [np.zeros((H, W), np.float32) for b in wbands]
comods_nocentral = [np.zeros((H, W), np.float32) for b in wbands]
con = [np.zeros((H, W), np.uint8) for b in wbands]
for iband, band in enumerate(wbands):
for ii, src in enumerate(srcs):
src.setBrightness( NanoMaggies(**{wanyband: cat.get('flux_w{}'.format(band) )[ii]}) )
srcs_nocentral = np.array(srcs)[keep].tolist()
#srcs_nocentral = np.array(srcs)[nocentral].tolist()
# The tiles have some overlap, so for each source, keep the fit in the
# tile whose center is closest to the source.
for tile in tiles:
#print('Reading tile {}'.format(tile.coadd_id))
tim = get_unwise_tractor_image(unwise_dir, tile.coadd_id, band,
bandname=wanyband, roiradecbox=roiradec)
if tim is None:
print('Actually, no overlap with tile {}'.format(tile.coadd_id))
continue
print('Read image {} with shape {}'.format(tile.coadd_id, tim.shape))
def _unwise_mod(tim, use_cat, use_srcs, margin=10):
# Select sources in play.
wisewcs = tim.wcs.wcs
timH, timW = tim.shape
ok, x, y = wisewcs.radec2pixelxy(use_cat.ra, use_cat.dec)
x = (x - 1.).astype(np.float32)
y = (y - 1.).astype(np.float32)
I = np.flatnonzero((x >= -margin) * (x < timW + margin) *
(y >= -margin) * (y < timH + margin))
#print('Found {} sources within the image + margin = {} pixels'.format(len(I), margin))
subcat = [use_srcs[i] for i in I]
tractor = Tractor([tim], subcat)
mod = tractor.getModelImage(0)
return mod
mod = _unwise_mod(tim, cat, srcs)
mod_nocentral = _unwise_mod(tim, cat_nocentral, srcs_nocentral)
try:
Yo, Xo, Yi, Xi, nil = resample_with_wcs(targetwcs, tim.wcs.wcs)
except ResampleError:
continue
if len(Yo) == 0:
continue
# The models are already in AB nanomaggies, but the tiles / tims are
# in Vega nanomaggies, so convert them here.
coimgs[iband][Yo, Xo] += tim.getImage()[Yi, Xi]
comods[iband][Yo, Xo] += mod[Yi, Xi]
comods_nocentral[iband][Yo, Xo] += mod_nocentral[Yi, Xi]
con [iband][Yo, Xo] += 1
## Convert back to nanomaggies.
#vega2ab = vega_to_ab['w{}'.format(band)]
#coimgs[iband] *= 10**(-0.4 * vega2ab)
#comods[iband] *= 10**(-0.4 * vega2ab)
#comods_nocentral[iband] *= 10**(-0.4 * vega2ab)
for img, mod, mod_nocentral, n in zip(coimgs, comods, comods_nocentral, con):
img /= np.maximum(n, 1)
mod /= np.maximum(n, 1)
mod_nocentral /= np.maximum(n, 1)
coresids = [img-mod for img, mod in list(zip(coimgs, comods))]
# Subtract the model image which excludes the central (comod_nocentral)
# from the data (coimg) to isolate the light of the central
# (coimg_central).
coimgs_central = [img-mod for img, mod in list(zip(coimgs, comods_nocentral))]
# Write out the final images with and without the central and converted into
# AB nanomaggies.
for coadd, imtype in zip( (coimgs, comods, comods_nocentral),
('image', 'model', 'model-nocentral') ):
for img, band in zip(coadd, wbands):
vega2ab = vega_to_ab['w{}'.format(band)]
fitsfile = os.path.join(output_dir, '{}-{}-W{}.fits'.format(galaxy, imtype, band))
if verbose:
print('Writing {}'.format(fitsfile))
fitsio.write(fitsfile, img * 10**(-0.4 * vega2ab), clobber=True)
# Generate color WISE images.
kwa = dict(mn=-1, mx=100, arcsinh=0.5)
#kwa = dict(mn=-0.05, mx=1., arcsinh=0.5)
#kwa = dict(mn=-0.1, mx=2., arcsinh=None)
for imgs, imtype in zip( (coimgs, comods, coresids, comods_nocentral, coimgs_central),
('image', 'model', 'resid', 'model-nocentral', 'image-central') ):
rgb = _unwise_to_rgb(imgs[:2], **kwa) # W1, W2
jpgfile = os.path.join(output_dir, '{}-{}-W1W2.jpg'.format(galaxy, imtype))
if verbose:
print('Writing {}'.format(jpgfile))
imsave_jpeg(jpgfile, rgb, origin='lower')
return 1
| 11,596 | 38.580205 | 105 |
py
|
SGA
|
SGA-main/py/SGA/qa.py
|
"""
SGA.qa
======
Code to do produce various QA (quality assurance) plots.
"""
import os, pdb
import warnings
import time, subprocess
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import SGA.misc
#import seaborn as sns
#sns.set(style='ticks', font_scale=1.4, palette='Set2')
sns, _ = SGA.misc.plot_style()
def _sbprofile_colors():
"""Return an iterator of colors good for the surface brightness profile plots.
https://seaborn.pydata.org/generated/seaborn.color_palette.html#seaborn.color_palette
"""
_colors = sns.color_palette('Set1', n_colors=11, desat=0.75)
colors = iter([ _colors[1], _colors[2], _colors[0], _colors[3], _colors[4],
_colors[5], _colors[6], _colors[7], _colors[8],
_colors[9], _colors[10]])
return colors
def qa_binned_radec(cat, nside=64, png=None):
import warnings
import healpy as hp
import desimodel.io
import desimodel.footprint
from desiutil.plots import init_sky, plot_sky_binned
ra, dec = cat['ra'].data, cat['dec'].data
hpix = desimodel.footprint.radec2pix(nside, ra, dec)
fig, ax = plt.subplots(figsize=(9, 5))
with warnings.catch_warnings():
pixweight = desimodel.io.load_pixweight(nside)
fracarea = pixweight[hpix]
weight = 1 / fracarea
warnings.simplefilter('ignore')
basemap = init_sky(galactic_plane_color='k', ax=ax);
plot_sky_binned(ra, dec, weights=weight,
max_bin_area=hp.nside2pixarea(nside, degrees=True),
verbose=False, clip_lo='!1', cmap='viridis',
plot_type='healpix', basemap=basemap,
label=r'$N$(Large Galaxies) / deg$^2$')
#plt.suptitle('Parent Sample')
if png:
fig.savefig(png)
def qa_multiwavelength_coadds(galaxy, galaxydir, htmlgalaxydir, clobber=False,
verbose=True):
"""Montage the multiwavelength coadds into a nice QAplot."""
# Show the data (GALEX, LS, unWISE from left to right).
montagefile = os.path.join(htmlgalaxydir, '{}-multiwavelength-data.png'.format(galaxy))
if not os.path.isfile(montagefile) or clobber:
# Make sure all the files exist.
check = True
jpgfile = []
for suffix in ('image-FUVNUV', 'custom-image-grz', 'image-W1W2'):
_jpgfile = os.path.join(galaxydir, '{}-{}.jpg'.format(galaxy, suffix))
jpgfile.append(_jpgfile)
if not os.path.isfile(_jpgfile):
print('File {} not found!'.format(_jpgfile))
check = False
if check:
cmd = 'montage -bordercolor white -borderwidth 1 -tile 3x1 -geometry +0+0 -resize 512 '
cmd = cmd+' '.join(ff for ff in jpgfile)
cmd = cmd+' {}'.format(montagefile)
if verbose:
print('Writing {}'.format(montagefile))
subprocess.call(cmd.split())
# Now make a 3x3 montage which has the data, model (no central), residual
# (just central) from left to right and GALEX, LS, unWISE from top to
# bottom.
montagefile = os.path.join(htmlgalaxydir, '{}-multiwavelength-models.png'.format(galaxy))
if not os.path.isfile(montagefile) or clobber:
# Make sure all the files exist.
check = True
jpgfile = []
for suffix in ('image-FUVNUV', 'model-nocentral-FUVNUV', 'image-central-FUVNUV',
'custom-image-grz', 'custom-model-nocentral-grz', 'custom-image-central-grz',
'image-W1W2', 'model-nocentral-W1W2', 'image-central-W1W2'):
_jpgfile = os.path.join(galaxydir, '{}-{}.jpg'.format(galaxy, suffix))
jpgfile.append(_jpgfile)
if not os.path.isfile(_jpgfile):
print('File {} not found!'.format(_jpgfile))
check = False
if check:
cmd = 'montage -bordercolor white -borderwidth 1 -tile 3x3 -geometry +0+0 -resize 512 '
cmd = cmd+' '.join(ff for ff in jpgfile)
cmd = cmd+' {}'.format(montagefile)
if verbose:
print('Writing {}'.format(montagefile))
subprocess.call(cmd.split())
def ellipse_sbprofile(ellipsefit, minerr=0.0):
"""Convert ellipse-fitting results to a magnitude, color, and surface brightness
profiles.
"""
band, refband = ellipsefit['band'], ellipsefit['refband']
pixscale, redshift = ellipsefit['pixscale'], ellipsefit['redshift']
indx = np.ones(len(ellipsefit[refband]), dtype=bool)
sbprofile = dict()
for filt in band:
sbprofile['psfsigma_{}'.format(filt)] = ellipsefit['psfsigma_{}'.format(filt)]
sbprofile['redshift'] = redshift
sbprofile['minerr'] = minerr
sbprofile['smaunit'] = 'arcsec'
sbprofile['sma'] = ellipsefit['r'].sma[indx] * pixscale # [arcsec]
# Create a pixel scale mapping to accommodate GALEX and unWISE imaging.
#filt2pixscalefactor = {'g': 1.0, 'r': 1.0, 'z': 1.0}
#if 'NUV' in band:
# sbprofile['sma_galex'] = ellipsefit['r'].sma * ellipsefit['galex_pixscale'] / pixscale # [arcsec]
#if 'W1' in band:
# sbprofile['sma_unwise'] = ellipsefit['r'].sma * ellipsefit['unwise_pixscale'] / pixscale # [arcsec]
with np.errstate(invalid='ignore'):
for filt in band:
#area = ellipsefit[filt].sarea[indx] * pixscale**2
sbprofile['mu_{}'.format(filt)] = 22.5 - 2.5 * np.log10(ellipsefit[filt].intens[indx])
#sbprofile[filt] = 22.5 - 2.5 * np.log10(ellipsefit[filt].intens[indx])
sbprofile['mu_{}_err'.format(filt)] = 2.5 * ellipsefit[filt].int_err[indx] / \
ellipsefit[filt].intens[indx] / np.log(10)
sbprofile['mu_{}_err'.format(filt)] = np.sqrt(sbprofile['mu_{}_err'.format(filt)]**2 + minerr**2)
# Just for the plot use a minimum uncertainty
#sbprofile['{}_err'.format(filt)][sbprofile['{}_err'.format(filt)] < minerr] = minerr
if 'mu_g' in sbprofile.keys() and 'mu_r' in sbprofile.keys():
sbprofile['gr'] = sbprofile['mu_g'] - sbprofile['mu_r']
sbprofile['gr_err'] = np.sqrt(sbprofile['mu_g_err']**2 + sbprofile['mu_r_err']**2)
if 'mu_r' in sbprofile.keys() and 'mu_z' in sbprofile.keys():
sbprofile['rz'] = sbprofile['mu_r'] - sbprofile['mu_z']
sbprofile['rz_err'] = np.sqrt(sbprofile['mu_r_err']**2 + sbprofile['mu_z_err']**2)
if 'mu_r' in sbprofile.keys() and 'mu_i' in sbprofile.keys():
sbprofile['ri'] = sbprofile['mu_r'] - sbprofile['mu_i']
sbprofile['ri_err'] = np.sqrt(sbprofile['mu_r_err']**2 + sbprofile['mu_i_err']**2)
return sbprofile
def display_ellipse_sbprofile(ellipsefit, skyellipsefit={}, minerr=0.0,
smascale=None, png=None, verbose=True):
"""Display the multiwavelength surface brightness profile.
"""
import astropy.stats
#from legacyhalos.ellipse import ellipse_sbprofile
if ellipsefit['success']:
sbprofile = ellipse_sbprofile(ellipsefit, minerr=minerr)
band, refband = ellipsefit['band'], ellipsefit['refband']
redshift, pixscale = ellipsefit['redshift'], ellipsefit['pixscale']
if smascale is None:
smascale = SGA.misc.arcsec2kpc(redshift) # [kpc/arcsec]
#if png:
# sbfile = png.replace('.png', '.txt')
# legacyhalos.io.write_sbprofile(sbprofile, smascale, sbfile)
yminmax = [40, 0]
xminmax = [0, 0]
colors = _sbprofile_colors()
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True,
gridspec_kw = {'height_ratios':[1, 0.5]})
for filt in band:
sma = sbprofile['sma']
mu = sbprofile['mu_{}'.format(filt)]
muerr = sbprofile['mu_{}_err'.format(filt)]
#good = (ellipsefit[filt].stop_code < 4)
#bad = ~good
#with np.errstate(invalid='ignore'):
# good = np.isfinite(mu) * (mu / muerr > 2)
good = np.isfinite(mu) * (muerr < 0.5)
sma = sma[good]
mu = mu[good]
muerr = muerr[good]
col = next(colors)
ax1.fill_between(sma, mu-muerr, mu+muerr, label=r'${}$'.format(filt), color=col,
alpha=0.75, edgecolor='k', lw=2)
if np.nanmin(mu-muerr) < yminmax[0]:
yminmax[0] = np.nanmin(mu-muerr)
if np.nanmax(mu+muerr) > yminmax[1]:
yminmax[1] = np.nanmax(mu+muerr)
if np.nanmax(sma) > xminmax[1]:
xminmax[1] = np.nanmax(sma)
if bool(skyellipsefit):
skysma = skyellipsefit['sma'] * pixscale
with warnings.catch_warnings():
warnings.simplefilter('ignore')
sky = astropy.stats.mad_std(skyellipsefit[filt], axis=1, ignore_nan=True)
# sky = np.nanstd(skyellipsefit[filt], axis=1) # / np.sqrt(skyellipsefit[
skygood = np.isfinite(sky)
skysma = skysma[skygood]
skymu = 22.5 - 2.5 * np.log10(sky[skygood])
ax1.plot( skysma, skymu , color=col, ls='--', alpha=0.75)
if skymu.max() > yminmax[1]:
yminmax[1] = skymu.max()
ax1.text(0.05, 0.04, 'Sky Variance', ha='left', va='center',
transform=ax1.transAxes, fontsize=12)
#ax1.axhline(y=ellipsefit['mu_{}_sky'.format(filt)], color=col, ls='--')
#if filt == refband:
# ysky = ellipsefit['mu_{}_sky'.format(filt)] - 2.5 * np.log10(0.1) # 10% of sky
# ax1.axhline(y=ysky, color=col, ls='--')
ax1.set_ylabel(r'$\mu(a)$ (mag arcsec$^{-2}$)')
#ax1.set_ylabel(r'Surface Brightness $\mu(a)$ (mag arcsec$^{-2}$)')
#ax1.set_ylabel(r'Surface Brightness $\mu(r)$ (mag arcsec$^{-2}$)')
ylim = [yminmax[0]-0.5, yminmax[1]+0.75]
if ylim[0] < 17:
ylim[0] = 10 # 17
if ylim[1] > 32.5:
ylim[1] = 35 # 32.5
ax1.set_ylim(ylim)
ax1.invert_yaxis()
xlim = [xminmax[0], xminmax[1]*1.01]
#ax1.set_xlim(xmin=0)
#ax1.margins(xmargin=0)
#ax1.set_ylabel(r'$\mu$ (mag arcsec$^{-2}$)')
#ax1.set_ylim(31.99, 18)
ax1_twin = ax1.twiny()
ax1_twin.set_xlim( (xlim[0]*smascale, xlim[1]*smascale) )
ax1_twin.set_xlabel('Semi-major Axis $a$ (kpc)')
ax1.legend(loc='upper right', ncol=1)
# color vs semi-major axis
ax2.fill_between(sbprofile['sma'],
sbprofile['gr'] - sbprofile['gr_err'],
sbprofile['gr'] + sbprofile['gr_err'],
label=r'$g - r$', color=next(colors), alpha=0.75,
edgecolor='k', lw=2)
if 'rz' in sbprofile.keys():
ax2.fill_between(sbprofile['sma'],
sbprofile['rz'] - sbprofile['rz_err'],
sbprofile['rz'] + sbprofile['rz_err'],
label=r'$r - z$', color=next(colors), alpha=0.75,
edgecolor='k', lw=2)
elif 'ri' in sbprofile.keys():
ax2.fill_between(sbprofile['sma'],
sbprofile['ri'] - sbprofile['ri_err'],
sbprofile['ri'] + sbprofile['ri_err'],
label=r'$r - i$', color=next(colors), alpha=0.75,
edgecolor='k', lw=2)
ax2.set_xlabel(r'Semi-major Axis $a$ (arcsec)')
#ax2.set_xlabel(r'Galactocentric radius $r$ (arcsec)')
#ax2.legend(loc='upper left')
ax2.legend(bbox_to_anchor=(0.25, 0.99))
ax2.set_ylabel('Color (mag)')
ax2.set_ylim(-0.5, 2.8)
for xx in (ax1, ax2):
xx.set_xlim(xlim)
ylim = xx.get_ylim()
xx.fill_between([0, 3*ellipsefit['psfsigma_r']*ellipsefit['pixscale']], [ylim[0], ylim[0]],
[ylim[1], ylim[1]], color='grey', alpha=0.1)
ax2.text(0.03, 0.09, 'PSF\n(3$\sigma$)', ha='center', va='center',
transform=ax2.transAxes, fontsize=10)
fig.subplots_adjust(hspace=0.0)
if png:
if verbose:
print('Writing {}'.format(png))
fig.savefig(png)
plt.close(fig)
else:
plt.show()
def display_ellipsefit(ellipsefit, xlog=False, png=None, verbose=True):
"""Display the isophote fitting results."""
from matplotlib.ticker import FormatStrFormatter, ScalarFormatter
colors = iter(sns.color_palette())
if ellipsefit['success']:
band, refband = ellipsefit['band'], ellipsefit['refband']
pixscale, redshift = ellipsefit['pixscale'], ellipsefit['redshift']
smascale = legacyhalos.misc.arcsec2kpc(redshift) # [kpc/arcsec]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 9), sharex=True)
good = (ellipsefit[refband].stop_code < 4)
bad = ~good
ax1.fill_between(ellipsefit[refband].sma[good] * pixscale,
ellipsefit[refband].eps[good]-ellipsefit[refband].ellip_err[good],
ellipsefit[refband].eps[good]+ellipsefit[refband].ellip_err[good])#,
#edgecolor='k', lw=2)
if np.count_nonzero(bad) > 0:
ax1.scatter(ellipsefit[refband].sma[bad] * pixscale, ellipsefit[refband].eps[bad],
marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)
#ax1.errorbar(ellipsefit[refband].sma[good] * smascale,
# ellipsefit[refband].eps[good],
# ellipsefit[refband].ellip_err[good], fmt='o',
# markersize=4)#, color=color[refband])
#ax1.set_ylim(0, 0.5)
ax1.xaxis.set_major_formatter(ScalarFormatter())
ax2.fill_between(ellipsefit[refband].sma[good] * pixscale,
np.degrees(ellipsefit[refband].pa[good]-ellipsefit[refband].pa_err[good]),
np.degrees(ellipsefit[refband].pa[good]+ellipsefit[refband].pa_err[good]))#,
#edgecolor='k', lw=2)
if np.count_nonzero(bad) > 0:
ax2.scatter(ellipsefit[refband].sma[bad] * pixscale, np.degrees(ellipsefit[refband].pa[bad]),
marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)
#ax2.errorbar(ellipsefit[refband].sma[good] * smascale,
# np.degrees(ellipsefit[refband].pa[good]),
# np.degrees(ellipsefit[refband].pa_err[good]), fmt='o',
# markersize=4)#, color=color[refband])
#ax2.set_ylim(0, 180)
ax3.fill_between(ellipsefit[refband].sma[good] * pixscale,
ellipsefit[refband].x0[good]-ellipsefit[refband].x0_err[good],
ellipsefit[refband].x0[good]+ellipsefit[refband].x0_err[good])#,
#edgecolor='k', lw=2)
if np.count_nonzero(bad) > 0:
ax3.scatter(ellipsefit[refband].sma[bad] * pixscale, ellipsefit[refband].x0[bad],
marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)
#ax3.errorbar(ellipsefit[refband].sma[good] * smascale, ellipsefit[refband].x0[good],
# ellipsefit[refband].x0_err[good], fmt='o',
# markersize=4)#, color=color[refband])
ax3.xaxis.set_major_formatter(ScalarFormatter())
ax3.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax4.fill_between(ellipsefit[refband].sma[good] * pixscale,
ellipsefit[refband].y0[good]-ellipsefit[refband].y0_err[good],
ellipsefit[refband].y0[good]+ellipsefit[refband].y0_err[good])#,
#edgecolor='k', lw=2)
if np.count_nonzero(bad) > 0:
ax4.scatter(ellipsefit[refband].sma[bad] * pixscale, ellipsefit[refband].y0[bad],
marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)
#ax4.errorbar(ellipsefit[refband].sma[good] * smascale, ellipsefit[refband].y0[good],
# ellipsefit[refband].y0_err[good], fmt='o',
# markersize=4)#, color=color[refband])
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.yaxis.set_major_formatter(ScalarFormatter())
ax4.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax4.yaxis.tick_right()
ax4.yaxis.set_label_position('right')
ax4.xaxis.set_major_formatter(ScalarFormatter())
ax4.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
for xx in (ax1, ax2, ax3, ax4):
xx.set_xlim(xmin=0)
xlim = ax1.get_xlim()
ax1_twin = ax1.twiny()
ax1_twin.set_xlim( (xlim[0]*smascale, xlim[1]*smascale) )
ax1_twin.set_xlabel('Galactocentric radius (kpc)')
ax2_twin = ax2.twiny()
ax2_twin.set_xlim( (xlim[0]*smascale, xlim[1]*smascale) )
ax2_twin.set_xlabel('Galactocentric radius (kpc)')
ax1.set_ylabel(r'Ellipticity $\epsilon$')
ax2.set_ylabel('Position Angle (deg)')
ax3.set_xlabel(r'Galactocentric radius $r$ (arcsec)')
ax3.set_ylabel(r'$x$ Center')
ax4.set_xlabel(r'Galactocentric radius $r$ (arcsec)')
ax4.set_ylabel(r'$y$ Center')
if xlog:
for xx in (ax1, ax2, ax3, ax4):
xx.set_xscale('log')
fig.subplots_adjust(hspace=0.03, wspace=0.03, bottom=0.15, right=0.85, left=0.15)
if png:
if verbose:
print('Writing {}'.format(png))
fig.savefig(png)
plt.close(fig)
else:
plt.show()
def qa_curveofgrowth(ellipsefit, png=None, verbose=True):
"""Plot up the curve of growth versus semi-major axis.
"""
fig, ax = plt.subplots(figsize=(9, 7))
band, refband, redshift = ellipsefit['band'], ellipsefit['refband'], ellipsefit['redshift']
maxsma = ellipsefit['apphot_sma_{}'.format(refband)].max()
smascale = SGA.misc.arcsec2kpc(redshift) # [kpc/arcsec]
yfaint, ybright = 0, 50
for filt in band:
flux = ellipsefit['apphot_mag_{}'.format(filt)]
good = np.where( np.isfinite(flux) * (flux > 0) )[0]
sma = ellipsefit['apphot_sma_{}'.format(filt)][good]
mag = 22.5-2.5*np.log10(flux[good])
ax.plot(sma, mag, label=filt)
#print(filt, np.mean(mag[-5:]))
#print(filt, mag[-5:], np.mean(mag[-5:])
#print(filt, np.min(mag))
if mag.max() > yfaint:
yfaint = mag.max()
if mag.min() < ybright:
ybright = mag.min()
ax.set_xlabel(r'Semi-major Axis $a$ (arcsec)')
ax.set_ylabel('Cumulative Brightness (AB mag)')
ax.set_xlim(0, maxsma)
ax_twin = ax.twiny()
ax_twin.set_xlim( (0, maxsma * smascale) )
ax_twin.set_xlabel('Semi-major Axis $a$ (kpc)')
yfaint += 0.5
ybright += -0.5
ax.set_ylim(yfaint, ybright)
ax_twin = ax.twinx()
ax_twin.set_ylim(yfaint, ybright)
ax_twin.set_ylabel('Cumulative Brightness (AB mag)')#, rotation=-90)
ax.legend(loc='lower right', fontsize=14, ncol=3)
fig.subplots_adjust(left=0.12, bottom=0.15, top=0.85, right=0.88)
if png:
fig.savefig(png)
def display_multiband(data, geometry=None, mgefit=None, ellipsefit=None, indx=None,
magrange=10, inchperband=3, contours=False, png=None,
verbose=True, vertical=False):
"""Display the multi-band images and, optionally, the isophotal fits based on
either MGE and/or Ellipse.
vertical -- for talks...
"""
from astropy.visualization import AsinhStretch as Stretch
from astropy.visualization import ImageNormalize
band = data['band']
nband = len(band)
#cmap = 'RdBu_r'
#from astropy.visualization import PercentileInterval as Interval
#interval = Interval(0.9)
cmap = 'viridis'
from astropy.visualization import ZScaleInterval as Interval
interval = Interval(contrast=0.9)
#cmap = {'g': 'winter_r', 'r': 'summer', 'z': 'autumn_r'}
#cmap = {'g': 'Blues', 'r': 'Greens', 'z': 'Reds'}
stretch = Stretch(a=0.95)
if vertical:
fig, ax = plt.subplots(3, 1, figsize=(nband, inchperband*nband))
else:
fig, ax = plt.subplots(1, 3, figsize=(inchperband*nband, nband))
for filt, ax1 in zip(band, ax):
img = data['{}_masked'.format(filt)]
#img = data[filt]
norm = ImageNormalize(img, interval=interval, stretch=stretch)
im = ax1.imshow(img, origin='lower', norm=norm, cmap=cmap, #cmap=cmap[filt],
interpolation='nearest')
plt.text(0.1, 0.9, filt, transform=ax1.transAxes, #fontweight='bold',
ha='center', va='center', color='k', fontsize=16)
if mgefit:
from mge.mge_print_contours import _multi_gauss, _gauss2d_mge
sigmapsf = np.atleast_1d(0)
normpsf = np.atleast_1d(1)
_magrange = 10**(-0.4*np.arange(0, magrange, 1)[::-1]) # 0.5 mag/arcsec^2 steps
#_magrange = 10**(-0.4*np.arange(0, magrange, 0.5)[::-1]) # 0.5 mag/arcsec^2 steps
model = _multi_gauss(mgefit[filt].sol, img, sigmapsf, normpsf,
mgefit['xpeak'], mgefit['ypeak'],
mgefit['pa'])
peak = data[filt][mgefit['xpeak'], mgefit['ypeak']]
levels = peak * _magrange
s = img.shape
extent = [0, s[1], 0, s[0]]
ax1.contour(model, levels, colors='k', linestyles='solid',
extent=extent, alpha=0.5, lw=1)
if geometry:
from photutils import EllipticalAperture
ellaper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma,
geometry.sma*(1 - geometry.eps), geometry.pa)
ellaper.plot(color='k', lw=1, ax=ax1, alpha=0.75)
if ellipsefit:
if ellipsefit['success']:
if len(ellipsefit[filt]) > 0:
if indx is None:
indx = np.ones(len(ellipsefit[filt]), dtype=bool)
nfit = len(indx) # len(ellipsefit[filt])
nplot = np.rint(0.5*nfit).astype('int')
smas = np.linspace(0, ellipsefit[filt].sma[indx].max(), nplot)
for sma in smas:
efit = ellipsefit[filt].get_closest(sma)
x, y, = efit.sampled_coordinates()
ax1.plot(x, y, color='k', lw=1, alpha=0.5)
else:
from photutils import EllipticalAperture
geometry = ellipsefit['geometry']
ellaper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma,
geometry.sma*(1 - geometry.eps), geometry.pa)
ellaper.plot(color='k', lw=1, ax=ax1, alpha=0.5)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax1.axis('off')
#ax1.set_adjustable('box-forced')
ax1.autoscale(False)
if vertical:
fig.subplots_adjust(hspace=0.02, top=0.98, bottom=0.02, left=0.02, right=0.98)
else:
fig.subplots_adjust(wspace=0.02, top=0.98, bottom=0.02, left=0.02, right=0.98)
if png:
if verbose:
print('Writing {}'.format(png))
fig.savefig(png, bbox_inches='tight', pad_inches=0)
plt.close(fig)
else:
plt.show()
def display_ccdpos(onegal, ccds, radius=None, pixscale=0.262,
png=None, verbose=False):
"""Visualize the position of all the CCDs contributing to the image stack of a
single galaxy.
"""
if radius is None:
radius = 100 # [pixels]
wcs = SGA.misc.simple_wcs(onegal, radius=radius, pixscale=pixscale)
width, height = wcs.get_width() * pixscale / 3600, wcs.get_height() * pixscale / 3600 # [degrees]
bb, bbcc = wcs.radec_bounds(), wcs.radec_center() # [degrees]
pad = 0.2 # [degrees]
fig, allax = plt.subplots(1, 3, figsize=(12, 5), sharey=True, sharex=True)
for ax, band in zip(allax, ('g', 'r', 'z')):
ax.set_aspect('equal')
ax.set_xlim(bb[0]+width+pad, bb[0]-pad)
ax.set_ylim(bb[2]-pad, bb[2]+height+pad)
ax.set_xlabel('RA (deg)')
ax.text(0.9, 0.05, band, ha='center', va='bottom',
transform=ax.transAxes, fontsize=18)
if band == 'g':
ax.set_ylabel('Dec (deg)')
ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.add_patch(patches.Rectangle((bb[0], bb[2]), bb[1]-bb[0], bb[3]-bb[2],
# fill=False, edgecolor='black', lw=3, ls='--'))
ax.add_patch(patches.Circle((bbcc[0], bbcc[1]), radius * pixscale / 3600,
fill=False, edgecolor='black', lw=2))
ax.add_patch(patches.Circle((bbcc[0], bbcc[1]), 2*radius * pixscale / 3600, # inner sky annulus
fill=False, edgecolor='black', lw=1))
ax.add_patch(patches.Circle((bbcc[0], bbcc[1]), 5*radius * pixscale / 3600, # outer sky annulus
fill=False, edgecolor='black', lw=1))
these = np.where(ccds.filter == band)[0]
col = plt.cm.Set1(np.linspace(0, 1, len(ccds)))
for ii, ccd in enumerate(ccds[these]):
#print(ccd.expnum, ccd.ccdname, ccd.filter)
W, H, ccdwcs = SGA.misc.ccdwcs(ccd)
cc = ccdwcs.radec_bounds()
ax.add_patch(patches.Rectangle((cc[0], cc[2]), cc[1]-cc[0],
cc[3]-cc[2], fill=False, lw=2,
edgecolor=col[these[ii]],
label='ccd{:02d}'.format(these[ii])))
ax.legend(ncol=2, frameon=False, loc='upper left', fontsize=10)
plt.subplots_adjust(bottom=0.15, wspace=0.05, left=0.1, right=0.97, top=0.95)
if png:
if verbose:
print('Writing {}'.format(png))
fig.savefig(png)
plt.close(fig)
else:
plt.show()
| 26,737 | 40.198767 | 109 |
py
|
SGA
|
SGA-main/py/SGA/io.py
|
"""
SGA.io
======
Code to read and write the various SGA files.
"""
import os, warnings
import pickle, pdb
import numpy as np
import numpy.ma as ma
from glob import glob
import fitsio
from astropy.table import Table, Column, hstack
from astropy.io import fits
def custom_brickname(ra, dec):
brickname = '{:08d}{}{:07d}'.format(
int(100000*ra), 'm' if dec < 0 else 'p',
int(100000*np.abs(dec)))
#brickname = '{:06d}{}{:05d}'.format(
# int(1000*ra), 'm' if dec < 0 else 'p',
# int(1000*np.abs(dec)))
return brickname
def get_raslice(ra):
return '{:03d}'.format(int(ra))
def SGA_dir():
if 'SGA_DIR' not in os.environ:
print('Required ${SGA_DIR environment variable not set.')
raise EnvironmentError
return os.path.abspath(os.getenv('SGA_DIR'))
def analysis_dir():
adir = os.path.join(SGA_dir(), 'analysis')
if not os.path.isdir(adir):
os.makedirs(adir, exist_ok=True)
return adir
def sample_dir(version=None):
sdir = os.path.join(SGA_dir(), 'sample')
if not os.path.isdir(sdir):
os.makedirs(sdir, exist_ok=True)
if version:
sdir = os.path.join(SGA_dir(), 'sample', version)
if not os.path.isdir(sdir):
os.makedirs(sdir, exist_ok=True)
return sdir
def paper1_dir(figures=True):
pdir = os.path.join(SGA_dir(), 'science', 'paper1')
if not os.path.ipdir(pdir):
os.makedirs(pdir, exist_ok=True)
if figures:
pdir = os.path.join(pdir, 'figures')
if not os.path.ipdir(pdir):
os.makedirs(pdir, exist_ok=True)
return pdir
def html_dir():
#if 'NERSC_HOST' in os.environ:
# htmldir = '/global/project/projectdirs/cosmo/www/temp/ioannis/SGA'
#else:
# htmldir = os.path.join(SGA_dir(), 'html')
htmldir = os.path.join(SGA_dir(), 'html')
if not os.path.isdir(htmldir):
os.makedirs(htmldir, exist_ok=True)
return htmldir
def parent_version(version=None):
"""Version of the parent catalog.
These are the archived versions. For DR9 we reset the counter to start at v3.0!
#version = 'v1.0' # 18may13
#version = 'v2.0' # 18nov14
#version = 'v3.0' # 19sep26
#version = 'v4.0' # 19dec23
#version = 'v5.0' # 20jan30 (dr9e)
#version = 'v6.0' # 20feb25 (DR9-SV)
version = 'v7.0' # 20apr18 (DR9)
"""
if version is None:
#version = 'v1.0' # 18may13
#version = 'v2.0' # DR8 (18nov14)
version = 'v3.0' # DR9
return version
def get_parentfile(version=None, kd=False):
if kd:
suffix = 'kd.fits'
else:
suffix = 'fits'
parentfile = os.path.join(sample_dir(version=version), 'SGA-parent-{}.{}'.format(version, suffix))
return parentfile
def read_parent(columns=None, verbose=False, first=None, last=None,
version=None, chaos=False):
"""Read the SGA parent catalog.
"""
if version is None:
version = parent_version()
parentfile = get_parentfile(version=version)
if first and last:
if first > last:
print('Index first cannot be greater than index last, {} > {}'.format(first, last))
raise ValueError()
ext = 1
info = fitsio.FITS(parentfile)
nrows = info[ext].get_nrows()
rows = None
# Read the CHAOS sample.
if chaos:
allgals = info[1].read(columns='GALAXY')
rows = np.hstack( [np.where(np.isin(allgals, chaosgal.encode('utf-8')))[0]
for chaosgal in ('NGC0628', 'NGC5194', 'NGC5457', 'NGC3184')] )
rows = np.sort(rows)
nrows = len(rows)
nrows = info[1].get_nrows()
if first is None:
first = 0
if last is None:
last = nrows
if rows is None:
rows = np.arange(first, last)
else:
rows = rows[np.arange(first, last)]
else:
if last >= nrows:
print('Index last cannot be greater than the number of rows, {} >= {}'.format(last, nrows))
raise ValueError()
if rows is None:
rows = np.arange(first, last+1)
else:
rows = rows[np.arange(first, last+1)]
parent = Table(info[ext].read(rows=rows, upper=True, columns=columns))
if verbose:
if len(rows) == 1:
print('Read galaxy index {} from {}'.format(first, parentfile))
else:
print('Read galaxy indices {} through {} (N={}) from {}'.format(
first, last, len(parent), parentfile))
## Temporary hack to add the data release number, PSF size, and distance.
#if chaos:
# parent.add_column(Column(name='DR', dtype='S3', length=len(parent)))
# gal2dr = {'NGC0628': 'DR7', 'NGC5194': 'DR6', 'NGC5457': 'DR6', 'NGC3184': 'DR6'}
# for ii, gal in enumerate(np.atleast_1d(parent['GALAXY'])):
# if gal in gal2dr.keys():
# parent['DR'][ii] = gal2dr[gal]
return parent
def read_desi_tiles(verbose=False):
"""Read the latest DESI tile file.
"""
tilefile = os.path.join(sample_dir(), 'catalogs', 'desi-tiles.fits')
tiles = Table(fitsio.read(tilefile, ext=1, upper=True))
tiles = tiles[tiles['IN_DESI'] > 0]
if verbose:
print('Read {} DESI tiles from {}'.format(len(tiles), tilefile))
return tiles
def read_tycho(magcut=99, verbose=False):
"""Read the Tycho 2 catalog.
"""
tycho2 = os.path.join(sample_dir(), 'catalogs', 'tycho2.kd.fits')
tycho = Table(fitsio.read(tycho2, ext=1, upper=True))
tycho = tycho[np.logical_and(tycho['ISGALAXY'] == 0, tycho['MAG_BT'] <= magcut)]
if verbose:
print('Read {} Tycho-2 stars with B<{:.1f}.'.format(len(tycho), magcut), flush=True)
# Radius of influence; see eq. 9 of https://arxiv.org/pdf/1203.6594.pdf
#tycho['RADIUS'] = (0.0802*(tycho['MAG_BT'])**2 - 1.860*tycho['MAG_BT'] + 11.625) / 60 # [degree]
# From https://github.com/legacysurvey/legacypipe/blob/large-gals-only/py/legacypipe/runbrick.py#L1668
# Note that the factor of 0.262 has nothing to do with the DECam pixel scale!
tycho['RADIUS'] = np.minimum(1800., 150. * 2.5**((11. - tycho['MAG_BT']) / 4) ) * 0.262 / 3600
#import matplotlib.pyplot as plt
#oldrad = (0.0802*(tycho['MAG_BT'])**2 - 1.860*tycho['MAG_BT'] + 11.625) / 60 # [degree]
#plt.scatter(tycho['MAG_BT'], oldrad*60, s=1) ; plt.scatter(tycho['MAG_BT'], tycho['RADIUS']*60, s=1) ; plt.show()
#pdb.set_trace()
return tycho
def read_hyperleda(verbose=False, allwise=False, version=None):
"""Read the Hyperleda catalog.
These are the archived versions. For DR9 we reset the counter to start at v3.0!
if version == 'v1.0':
hyperfile = 'hyperleda-d25min10-18may13.fits'
elif version == 'v2.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
elif version == 'v3.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
elif version == 'v4.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
elif version == 'v5.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
elif version == 'v6.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
elif version == 'v7.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
else:
print('Unknown version!')
raise ValueError
"""
if version is None:
version = parent_version()
if version == 'v1.0':
hyperfile = 'hyperleda-d25min10-18may13.fits'
ref = 'LEDA-20180513'
elif version == 'v2.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
ref = 'LEDA-20181114'
elif version == 'v3.0':
hyperfile = 'hyperleda-d25min10-18nov14.fits'
ref = 'LEDA-20181114'
else:
print('Unknown version!')
raise ValueError
hyperledafile = os.path.join(sample_dir(), 'hyperleda', hyperfile)
allwisefile = hyperledafile.replace('.fits', '-allwise.fits')
leda = Table(fitsio.read(hyperledafile, ext=1, upper=True))
#leda.add_column(Column(name='GROUPID', dtype='i8', length=len(leda)))
if verbose:
print('Read {} objects from {}'.format(len(leda), hyperledafile), flush=True)
if allwise:
wise = Table(fitsio.read(allwisefile, ext=1, upper=True))
if verbose:
print('Read {} objects from {}'.format(len(wise), allwisefile), flush=True)
# Merge the tables
wise.rename_column('RA', 'WISE_RA')
wise.rename_column('DEC', 'WISE_DEC')
leda = hstack( (leda, wise) )
leda.add_column(Column(name='IN_WISE', data=np.zeros(len(leda)).astype(bool)))
haswise = np.where(wise['CNTR'] != -1)[0]
#nowise = np.where(wise['CNTR'] == 0)[0]
#print('unWISE match: {}/{} ({:.2f}%) galaxies.'.format(len(haswise), len(leda)))
#print('EXT_FLG summary:')
#for flg in sorted(set(leda['EXT_FLG'][haswise])):
# nn = np.sum(flg == leda['EXT_FLG'][haswise])
# print(' {}: {}/{} ({:.2f}%)'.format(flg, nn, len(haswise), 100*nn/len(haswise)))
#print('Need to think this through a bit more; look at:')
#print(' http://wise2.ipac.caltech.edu/docs/release/allsky/expsup/sec4_4c.html#xsc')
#leda['INWISE'] = (np.array(['NULL' not in dd for dd in wise['DESIGNATION']]) *
# np.isfinite(wise['W1SIGM']) * np.isfinite(wise['W2SIGM']) )
leda['IN_ALLWISE'][haswise] = True
print(' Identified {}/{} ({:.2f}%) objects with AllWISE photometry.'.format(
np.sum(leda['IN_ALLWISE']), len(leda), 100*np.sum(leda['IN_ALLWISE'])/len(leda) ))
# Assign a unique ID and also fix infinite PA and B/A.
leda.add_column(Column(name='SGA_ID', length=len(leda), dtype='i8'), index=0)
leda['SGA_ID'] = np.arange(len(leda))
leda['BYHAND'] = np.zeros(len(leda), bool)
leda['REF'] = ref
fix = np.isnan(leda['PA'])
if np.sum(fix) > 0:
leda['PA'][fix] = 0.0
fix = np.isnan(leda['BA'])
if np.sum(fix) > 0:
leda['BA'][fix] = 1.0
fix = np.isnan(leda['Z'])
if np.sum(fix) > 0:
leda['Z'][fix] = -99.0
return leda
def read_localgroup_dwarfs():
"""Read the sample generated by bin/SGA-localgroup-dwarfs.
"""
dwarfsfile = os.path.join(sample_dir(), 'catalogs', 'SGA-dwarfs.fits')
dwarfs = Table(fitsio.read(dwarfsfile, upper=True))
print('Read {} Local Group dwarfs from {}'.format(len(dwarfs), dwarfsfile))
return dwarfs
#def in_footprint(parent, verbose=False):
# """Find all galaxies in the DESI footprint.
#
# """
# import time
# import healpy as hp
# import legacyhalos.misc
#
# tiles = read_desi_tiles(verbose=verbose)
# indesi = SGA.misc.is_point_in_desi(tiles, parent['RA'], parent['DEC']).astype(bool)
#
# t0 = time.time()
#
# return parent
def in_footprint(parent, nside=2048, dr='dr9'):
"""Find all galaxies in the DESI footprint.
"""
import time
import healpy as hp
import legacyhalos.misc
#tiles = SGA.io.read_desi_tiles(verbose=verbose)
#indesi = SGA.misc.is_point_in_desi(tiles, parent['RA'], parent['DEC']).astype(bool)
parentpix = legacyhalos.misc.radec2pix(nside, parent['RA'], parent['DEC'])
#parentpix = np.hstack((parentpix, hp.pixelfunc.get_all_neighbours(nside, parentpix, nest=True).flatten()))
drdir = os.path.join(sample_dir(), dr)
bands = ('g', 'r', 'z')
camera = ('90prime', 'mosaic', 'decam')
indesi = dict()
for cam in camera:
for band in bands:
indesi.update({'{}_{}'.format(cam, band): np.zeros(len(parent), dtype=bool)})
#indesi = np.zeros(len(parent), dtype=bool)
t0 = time.time()
for cam, radius in zip(camera, (0.44, 0.21, 0.17)):
if False:
from astrometry.libkd.spherematch import trees_match, tree_open
kdccds = tree_open(os.path.join(drdir, 'survey-ccds-{}-{}.kd.fits'.format(cam, dr)))
I, J, dd = trees_match(kdparent, kdccds, np.radians(radius))#, nearest=True)
else:
ccdsfile = os.path.join(drdir, 'survey-ccds-{}-{}.kd.fits'.format(cam, dr))
ccds = fitsio.read(ccdsfile)
ccds = ccds[ccds['ccd_cuts'] == 0]
print('Read {} CCDs from {}'.format(len(ccds), ccdsfile))
for band in bands:
ww = ccds['filter'] == band
if np.sum(ww) > 0:
# add the neighboring healpixels to protect against edge effects
ccdpix = legacyhalos.misc.radec2pix(nside, ccds['ra'][ww], ccds['dec'][ww])
ccdpix = np.hstack((ccdpix, hp.pixelfunc.get_all_neighbours(nside, ccdpix, nest=True).flatten()))
if np.sum(ccdpix == -1) > 0: # remove the "no neighbors" healpixel, if it exists
ccdpix = np.delete(ccdpix, np.where(ccdpix == -1)[0])
I = np.isin(parentpix, ccdpix)
indesi['{}_{}'.format(cam, band)][I] = True
else:
I = [False]
#print('Found {} galaxies in {} {} footprint in {:.1f} sec'.format(np.sum(I), cam, time.time() - t0))
print(' Found {} galaxies in {} {} footprint.'.format(np.sum(I), cam, band))
print('Total time to find galaxies in footprint = {:.1f} sec'.format(time.time() - t0))
parent['IN_FOOTPRINT_NORTH'] = indesi['90prime_g'] | indesi['90prime_r'] | indesi['mosaic_z']
parent['IN_FOOTPRINT_NORTH_GRZ'] = indesi['90prime_g'] & indesi['90prime_r'] & indesi['mosaic_z']
parent['IN_FOOTPRINT_SOUTH'] = indesi['decam_g'] | indesi['decam_r'] | indesi['decam_z']
parent['IN_FOOTPRINT_SOUTH_GRZ'] = indesi['decam_g'] & indesi['decam_r'] & indesi['decam_z']
parent['IN_FOOTPRINT'] = parent['IN_FOOTPRINT_NORTH'] | parent['IN_FOOTPRINT_SOUTH']
parent['IN_FOOTPRINT_GRZ'] = parent['IN_FOOTPRINT_NORTH_GRZ'] | parent['IN_FOOTPRINT_SOUTH_GRZ']
#plt.scatter(parent['RA'], parent['DEC'], s=1)
#plt.scatter(parent['RA'][indesi], parent['DEC'][indesi], s=1)
#plt.xlim(360, 0)
#plt.show()
#bb = parent[parent['IN_FOOTPRINT_NORTH_GRZ'] & parent['IN_FOOTPRINT_SOUTH_GRZ']]
#plt.scatter(bb['RA'], bb['DEC'], s=1)
#plt.xlim(300, 90) ; plt.ylim(30, 36)
#plt.axhline(y=32.375, color='k')
#plt.xlabel('RA') ; plt.ylabel('Dec')
#plt.show()
print(' Identified {}/{} ({:.2f}%) galaxies inside and {}/{} ({:.2f}%) galaxies outside the DESI footprint.'.format(
np.sum(parent['IN_FOOTPRINT']), len(parent), 100*np.sum(parent['IN_FOOTPRINT'])/len(parent), np.sum(~parent['IN_FOOTPRINT']),
len(parent), 100*np.sum(~parent['IN_FOOTPRINT'])/len(parent)))
return parent
| 14,765 | 35.369458 | 133 |
py
|
SGA
|
SGA-main/py/SGA/webapp/settings.py
|
#!/usr/bin/env python
"""Django settings for SGA project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# This is the "webapp" dir.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'eok&i&(7=!8u%9lr48%pks9x7pfp7b=a6^p)^ldscte+t&_tz+')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1',
'testserver',
'lb.cosmo-sga.development.svc.spin.nersc.org',
'sga.legacysurvey.org']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'SGA.webapp.sample.app.SgaApp',
'SGA.webapp.sample',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SGA.webapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
WSGI_APPLICATION = 'SGA.webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db', 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
#python manage.py check --deploy recommended settings
#SECURE_HSTS_SECONDS =
#SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
#SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
#X_FRAME_OPTIONS = 'DENY'
| 3,973 | 25.317881 | 102 |
py
|
SGA
|
SGA-main/py/SGA/webapp/wsgi.py
|
"""
WSGI config for SGA webapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SGA.webapp.settings")
application = get_wsgi_application()
| 397 | 22.411765 | 78 |
py
|
SGA
|
SGA-main/py/SGA/webapp/load.py
|
#!/usr/bin/env python
"""Load the input sample into a database table.
"""
import os
import numpy as np
import fitsio
import django
from astropy.table import Table, hstack
DATADIR = '/global/cfs/cdirs/cosmo/data/sga/2020'
#DATADIR = '/global/cfs/cdirs/cosmo/work/legacysurvey/sga/2020'
def main():
from astrometry.util.starutil_numpy import radectoxyz
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SGA.webapp.settings")
django.setup()
from SGA.webapp.sample.models import Sample
sgafile = os.path.join(DATADIR, 'SGA-2020.fits')
sga_columns = ['sga_id', 'galaxy', 'morphtype',
'ra', 'dec',
'ra_leda', 'dec_leda',
'd25_leda', 'pa_leda', 'ba_leda', 'pgc',
'ra_moment', 'dec_moment', 'd26', 'pa', 'ba', 'sma_moment',
'group_id', 'group_name', 'group_ra', 'group_dec', 'group_diameter', 'group_primary',
'g_sma50', 'r_sma50', 'z_sma50',
'sma_sb24', 'sma_sb25', 'sma_sb26',
'g_mag_sb24', 'g_mag_sb25', 'g_mag_sb26',
'r_mag_sb24', 'r_mag_sb25', 'r_mag_sb26',
'z_mag_sb24', 'z_mag_sb25', 'z_mag_sb26',
'g_cog_params_mtot', 'r_cog_params_mtot', 'z_cog_params_mtot',
]
tractor_cols = ['type', 'sersic', 'shape_r', 'shape_e1', 'shape_e2',
'flux_g', 'flux_r', 'flux_z', 'flux_ivar_g', 'flux_ivar_r', 'flux_ivar_z']
sga = Table(fitsio.read(sgafile, ext='ELLIPSE', columns=sga_columns))
sga_tractor = Table(fitsio.read(sgafile, ext='TRACTOR', columns=tractor_cols))
sga = hstack((sga, sga_tractor))
print('Read {} rows from {}'.format(len(sga), sgafile))
sga.rename_column('TYPE', 'TRACTORTYPE')
sga['NICE_GROUP_NAME'] = [gname.replace('_GROUP', ' Group') for gname in sga['GROUP_NAME']]
print(sga.colnames)
xyz = radectoxyz(sga['RA'], sga['DEC'])
#xyz = radectoxyz(sga['RA_MOMENT'], sga['DEC_MOMENT'])
#xyz = radectoxyz(sga['RA_LEDA'], sga['DEC_LEDA'])
objs = []
nextpow = 1024
for ii, onegal in enumerate(sga):
if ii == nextpow:
print('Row', ii)
nextpow *= 2
sam = Sample()
sam.row_index = ii
sam.ux = xyz[ii, 0]
sam.uy = xyz[ii, 1]
sam.uz = xyz[ii, 2]
for col in sga.colnames:
val = onegal[col]
if type(val) == np.str or type(val) == np.str_:
val.strip()
setattr(sam, col.lower(), val)
objs.append(sam)
print('Bulk creating the database.')
Sample.objects.bulk_create(objs)
if __name__ == '__main__':
main()
| 2,737 | 32.390244 | 104 |
py
|
SGA
|
SGA-main/py/SGA/webapp/manage.py
|
#!/usr/bin/env python
"""
Used to run the server for this Django project
"""
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SGA.webapp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 593 | 28.7 | 74 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.