repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
inFairness
|
inFairness-main/examples/sentiment-analysis/data.py
|
import torch
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set_context(rc={'figure.figsize': (9, 9)}, font_scale=2.)
TOKEN_RE = re.compile(r"\w.*?\b")
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filepath):
"""
load a file from Bing Liu's sentiment lexicon containing
English words in Latin-1 encoding
One file contains a list of positive words, and the other
contains a list of negative words. The files contain comment
lines starting with ';' and blank lines, which should be skipped
"""
lexicon = []
with open(filepath, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_data(data_path, embeddings_path, state=0):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
# filter words that do not appear in the embedding index
pos_words = [word for word in pos_words if word in embeddings.index]
neg_words = [word for word in neg_words if word in embeddings.index]
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
y_train = train_targets
y_train[y_train == -1] = 0
y_test = test_targets
y_test[y_test == -1] = 0
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab
def load_test_names(embeddings):
NAMES_BY_ETHNICITY = {
# The first two lists are from the Caliskan et al. appendix describing the
# Word Embedding Association Test.
'White': [
'Adam', 'Chip', 'Harry', 'Josh', 'Roger', 'Alan', 'Frank', 'Ian', 'Justin',
'Ryan', 'Andrew', 'Fred', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg', 'Jed',
'Paul', 'Todd', 'Brandon', 'Hank', 'Jonathan', 'Peter', 'Wilbur', 'Amanda',
'Courtney', 'Heather', 'Melanie', 'Sara', 'Amber', 'Crystal', 'Katie',
'Meredith', 'Shannon', 'Betsy', 'Donna', 'Kristin', 'Nancy', 'Stephanie',
'Bobbie-Sue', 'Ellen', 'Lauren', 'Peggy', 'Sue-Ellen', 'Colleen', 'Emily',
'Megan', 'Rachel', 'Wendy'
],
'Black': [
'Alonzo', 'Jamel', 'Lerone', 'Percell', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Rasaan', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Rashaun',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence', 'Tyrone', 'Everol',
'Lavon', 'Marcellus', 'Terryl', 'Wardell', 'Aiesha', 'Lashelle', 'Nichelle',
'Shereen', 'Temeka', 'Ebony', 'Latisha', 'Shaniqua', 'Tameisha', 'Teretha',
'Jasmine', 'Latonya', 'Shanise', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Sharise', 'Tashika', 'Yolanda', 'Lashandra', 'Malika', 'Shavonn',
'Tawanda', 'Yvette'
]
}
NAMES_BY_ETHNICITY['White'] = [n.lower() for n in NAMES_BY_ETHNICITY['White'] if n.lower() in embeddings.index]
NAMES_BY_ETHNICITY['Black'] = [n.lower() for n in NAMES_BY_ETHNICITY['Black'] if n.lower() in embeddings.index]
white_female_start = NAMES_BY_ETHNICITY['White'].index('amanda')
black_female_start = NAMES_BY_ETHNICITY['Black'].index('aiesha')
test_gender = white_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['White']) - white_female_start)*['Female']
test_gender += black_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['Black']) - black_female_start)*['Female']
test_df = pd.DataFrame({'name':NAMES_BY_ETHNICITY['White'] + NAMES_BY_ETHNICITY['Black'],
'race':len(NAMES_BY_ETHNICITY['White'])*['White'] + len(NAMES_BY_ETHNICITY['Black'])*['Black'],
'gender':test_gender})
test_names_embed = embeddings.loc[test_df['name']].values
return test_df, test_names_embed
def load_nyc_names(names_path, embeddings):
names_df = pd.read_csv(names_path)
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
names_embed = embeddings.loc[names_from_df].values
return names_embed
def print_summary(test_df, method_name, test_accuracy):
print(method_name + ' test accuracy %f' % test_accuracy)
mean_sentiments_race = []
for r in ['Black', 'White']:
mean_sent = test_df[method_name + '_logits'][test_df['race']==r].mean()
mean_sentiments_race.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(r, mean_sent))
print(method_name + ' race mean sentiment difference is %f\n' % np.abs(mean_sentiments_race[0] - mean_sentiments_race[1]))
mean_sentiments_gender = []
for g in ['Female', 'Male']:
mean_sent = test_df[method_name + '_logits'][test_df['gender']==g].mean()
mean_sentiments_gender.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(g, mean_sent))
print(method_name + ' gender mean sentiment difference is %f\n' % np.abs(mean_sentiments_gender[0] - mean_sentiments_gender[1]))
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(6*2, 6))
sns.boxplot(x='race', y=method_name + '_logits', data=test_df, ax=axs[0]).set_title(method_name, fontsize=20)
sns.boxplot(x='gender', y=method_name + '_logits', data=test_df, ax=axs[1]).set_title(method_name, fontsize=20)
axs[0].set_ylim([-0.1, 1.1])
axs[0].set_xlabel('Race', size=18)
axs[0].set_ylabel('Sentiment', size=18, labelpad=-5)
axs[1].set_ylim([-0.1, 1.1])
axs[1].set_xlabel('Gender', size=18)
axs[1].set_ylabel('Sentiment', size=18, labelpad=-5)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
return
def text_to_sentiment(text, network, embedding, device):
tokens = [token.casefold() for token in TOKEN_RE.findall(text)]
with torch.no_grad():
sentence_embeddings = []
for token in tokens:
vec = embedding.loc[token].dropna()
sentence_embeddings.append(torch.Tensor(vec).view(1, -1))
sentence_embeddings = torch.cat(sentence_embeddings, dim=0).mean(dim=0, keepdim=True).to(device)
sentiment = network(sentence_embeddings)
sentiment = torch.nn.functional.softmax(sentiment.mean(dim=0, keepdim=True), dim=-1)
mean_sentiment = sentiment.data.detach().cpu().numpy()[0]
return mean_sentiment
def format_sentiment_score(score):
if score[0] > score[1]:
return 'Negative with score ' + '{:.2f}%'.format(score[1]*100)
elif score[1] > score[0]:
return 'Positive with score ' + '{:.2f}%'.format(score[1]*100)
return 'Neutral with score ' + '{:.2f}%'.format(score[1]*100)
| 9,005 | 38.327511 | 132 |
py
|
inFairness
|
inFairness-main/examples/synthetic-data/trainer.py
|
class Trainer(object):
"""Main trainer class that orchestrates the entire learning routine
Use this class to start training a model using individual fairness routines
Args:
dataloader (torch.util.data.DataLoader): training data loader
model (inFairness.fairalgo): Individual fairness algorithm
optimizer (torch.optim): Model optimizer
max_iterations (int): Number of training steps
"""
def __init__(self, dataloader, model, optimizer, max_iterations):
self.dataloader = dataloader
self.model = model
self.optimizer = optimizer
self.max_iterations = max_iterations
self._dataloader_iter = iter(self.dataloader)
def run_step(self):
try:
data = next(self._dataloader_iter)
except StopIteration:
self._dataloader_iter = iter(self.dataloader)
data = next(self._dataloader_iter)
if isinstance(data, list) or isinstance(data, tuple):
model_output = self.model(*data)
elif isinstance(data, dict):
model_output = self.model(**data)
else:
raise AttributeError(
"Data format not recognized. Only `list`, `tuple`, and `dict` are recognized."
)
self.optimizer.zero_grad()
model_output.loss.backward()
self.optimizer.step()
def train(self):
self.model.train(True)
for step_count in range(self.max_iterations):
self.run_step()
| 1,516 | 29.959184 | 94 |
py
|
inFairness
|
inFairness-main/examples/word-embedding-association-test/utils.py
|
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.utils.random import sample_without_replacement
from itertools import combinations
try: # SciPy >= 0.19
from scipy.special import comb
except ImportError:
from scipy.misc import comb
def statistics(X, Y, A, B):
score_x = np.matmul(X, A.T).mean(axis=1).sum() - np.matmul(X, B.T).mean(axis=1).sum()
score_y = np.matmul(Y, A.T).mean(axis=1).sum() - np.matmul(Y, B.T).mean(axis=1).sum()
return np.abs(score_x - score_y)
def effect_size(X, Y, A, B, Sigma=None):
mean_x = (np.matmul(X, A.T).mean(axis=1) - np.matmul(X, B.T).mean(axis=1)).mean()
mean_y = (np.matmul(Y, A.T).mean(axis=1) - np.matmul(Y, B.T).mean(axis=1)).mean()
XY = np.vstack((X,Y))
std_xy = (np.matmul(XY, A.T).mean(axis=1) - np.matmul(XY, B.T).mean(axis=1)).std()
return np.abs(mean_x - mean_y)/std_xy
def normalize_list(embeds, Sigma=None, proj=None):
res = []
for e in embeds:
if proj is not None:
e = np.matmul(e, proj)
if Sigma is None:
res.append(normalize(e))
else:
sigma_norm = np.sqrt(np.einsum('ij,ij->i', np.matmul(e, Sigma), e))
res.append(e/sigma_norm.reshape(-1,1))
return res
def run_test(X, Y, A, B, Sigma=None, proj=None, n_combinations=50000):
X, Y, A, B = normalize_list([X, Y, A, B], Sigma=Sigma, proj=proj)
if Sigma is not None:
A = np.matmul(A, Sigma)
B = np.matmul(B, Sigma)
base_statistics = statistics(X, Y, A, B)
union_XY = np.vstack((X, Y))
xy_size = union_XY.shape[0]
x_size = X.shape[0]
count = 0
all_idx = set(range(xy_size))
if comb(xy_size, x_size) > n_combinations:
for _ in range(n_combinations):
group_1_idx = sample_without_replacement(xy_size, x_size)
group_2_idx = list(all_idx.difference(group_1_idx))
sample_stat = statistics(union_XY[group_1_idx], union_XY[group_2_idx], A, B)
count += sample_stat>base_statistics
else:
for group_1_idx in combinations(range(xy_size), x_size):
group_2_idx = list(all_idx.difference(group_1_idx))
sample_stat = statistics(union_XY[list(group_1_idx)], union_XY[group_2_idx], A, B)
count += sample_stat>base_statistics
p_val = count/n_combinations
effect_val = effect_size(X, Y, A, B)
return p_val, effect_val
| 2,492 | 33.150685 | 94 |
py
|
inFairness
|
inFairness-main/examples/word-embedding-association-test/data.py
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.random import sample_without_replacement
from sklearn.decomposition import TruncatedSVD
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filename):
"""
Load a file from Bing Liu's sentiment lexicon
(https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html), containing
English words in Latin-1 encoding.
One file contains a list of positive words, and the other contains
a list of negative words. The files contain comment lines starting
with ';' and blank lines, which should be skipped.
"""
lexicon = []
with open(filename, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_nyc_names(embeddings, names_path):
names_df = pd.read_csv(names_path + 'names.csv')
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
all_names_embed = embeddings.loc[names_from_df].values
return all_names_embed, names_from_df
def load_data(data_path, embeddings_path, state=None, names_path=None):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
# filter words that do not appear in the embedding index
pos_words = [word for word in pos_words if word in embeddings.index]
neg_words = [word for word in neg_words if word in embeddings.index]
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
if names_path is not None:
all_names_embed, names_from_df = load_nyc_names(embeddings, names_path)
else:
all_names_embed, names_from_df = None, None
if state is None:
X = vectors.values
return embeddings, X, targets, labels, all_names_embed, names_from_df
else:
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
# Encoding y
one_hot = OneHotEncoder(sparse=False, categories='auto')
one_hot.fit(np.array(train_targets).reshape(-1,1))
y_train = one_hot.transform(np.array(train_targets).reshape(-1,1))
y_test = one_hot.transform(np.array(test_targets).reshape(-1,1))
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab, all_names_embed, names_from_df
| 4,721 | 37.704918 | 116 |
py
|
inFairness
|
inFairness-main/tests/postprocessing/test_data_ds.py
|
import pytest
import torch
import numpy as np
from inFairness.distances import EuclideanDistance
from inFairness.postprocessing.data_ds import PostProcessingDataStore
def test_add_data():
ntries = 10
B, D = 10, 50
distance_x = EuclideanDistance()
data_ds = PostProcessingDataStore(distance_x)
counter = 0
for _ in range(ntries):
X = torch.rand(size=(B, D))
Y = torch.rand(size=(B,))
counter += B
data_ds.add_datapoints(X, Y)
assert data_ds.n_samples == counter
assert np.array_equal(
list(data_ds.distance_matrix.shape),
[counter, counter]
)
def test_reset_data():
B, D = 10, 50
distance_x = EuclideanDistance()
data_ds = PostProcessingDataStore(distance_x)
X = torch.rand(size=(B, D))
Y = torch.rand(size=(B,))
data_ds.add_datapoints(X, Y)
assert data_ds.n_samples == B
assert np.array_equal(list(data_ds.distance_matrix.shape), [B, B])
data_ds.reset()
assert data_ds.n_samples == 0
assert data_ds.distance_matrix is None
assert data_ds.data_X is None
assert data_ds.data_Y is None
| 1,153 | 22.08 | 70 |
py
|
inFairness
|
inFairness-main/tests/postprocessing/test_glif.py
|
import pytest
import torch
import torch.nn.functional as F
import numpy as np
from inFairness.distances import EuclideanDistance
from inFairness.postprocessing import GraphLaplacianIF
def test_postprocess_incorrectargs():
params = (1.0, 1.0, 100.0, True)
dist_x = EuclideanDistance()
pp = GraphLaplacianIF(dist_x, True)
with pytest.raises(AssertionError):
pp.postprocess(None, *params)
with pytest.raises(AssertionError):
pp.postprocess("coordinate-descent", *params)
@pytest.mark.parametrize(
"lambda_param,scale,threshold,normalize,dim,output_probas",
[
(1.0, 1.0, 100.0, True, 2, True),
(1.0, 1.0, 100.0, False, 2, True),
(1.0, 1.0, 100.0, True, 10, True),
(1.0, 1.0, 100.0, False, 10, True),
(1.0, 1.0, 100.0, True, 2, False),
(1.0, 1.0, 100.0, False, 2, False),
(1.0, 1.0, 100.0, True, 10, False),
(1.0, 1.0, 100.0, False, 10, False),
],
)
def test_postprocess_exact(lambda_param, scale, threshold, normalize, dim, output_probas):
B, E = 50, 100
X = torch.rand(size=(B, E))
Y = torch.rand(size=(B, dim))
if output_probas:
Y = F.softmax(Y, dim=-1)
dist_x = EuclideanDistance()
pp = GraphLaplacianIF(dist_x, is_output_probas=output_probas)
pp.add_datapoints(X, Y)
exact_solution = pp.postprocess("exact", lambda_param, scale, threshold, normalize)
assert np.array_equal(list(Y.shape), list(exact_solution.y_solution.shape))
coo_solution = pp.postprocess(
"coordinate-descent", lambda_param, scale,
threshold, normalize, batchsize=16, epochs=50
)
assert np.array_equal(list(Y.shape), list(coo_solution.y_solution.shape))
exact_obj = exact_solution.objective
coo_obj = coo_solution.objective
for key in ['y_dist', 'L_objective', 'overall_objective']:
assert abs(exact_obj[key] - coo_obj[key]) < 1e-3
| 1,926 | 29.109375 | 90 |
py
|
inFairness
|
inFairness-main/tests/distances/test_common_distances.py
|
import pytest
import math
import torch
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from inFairness import distances
def test_euclidean_distance():
dist = distances.EuclideanDistance()
X = torch.FloatTensor([[0.0, 0.0], [1.0, 1.0]])
Y = torch.FloatTensor([[1.0, 1.0], [1.0, 1.0]])
res = torch.FloatTensor([[math.sqrt(2)], [0.0]])
assert torch.all(dist(X, Y) == res)
def test_protected_euclidean_distance():
protected_attrs = [1] # Make the second dimension protected attribute
num_attrs = 3
dist = distances.ProtectedEuclideanDistance()
dist.fit(protected_attrs, num_attrs)
X = torch.FloatTensor(
[
[0.0, 1.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 2.0, 1.0],
]
)
Y = torch.FloatTensor(
[
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 2.0, 1.0],
[1.0, 2.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
]
)
res = torch.FloatTensor(
[[math.sqrt(2)], [math.sqrt(2)], [math.sqrt(2)], [0.0], [0.0], [0.0]]
)
assert torch.all(dist(X, Y) == res), f"{dist(X, Y)} :: {res}"
@pytest.mark.parametrize(
"itemwise_dist",
[(False), (True)],
)
def test_svd_sensitive_subspace_distance(itemwise_dist):
n_samples = 10
n_features = 50
n_components = 10
X_train = torch.rand((100, n_features))
n_samples = 10
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
metric = distances.SVDSensitiveSubspaceDistance()
metric.fit(X_train, n_components)
dist = metric(X1, X2, itemwise_dist)
if itemwise_dist:
assert list(dist.shape) == [n_samples, 1]
assert dist.requires_grad == True
else:
assert list(dist.shape) == [1, n_samples, n_samples]
assert dist.requires_grad == True
if itemwise_dist:
dist = metric(X1, X1, itemwise_dist)
assert torch.all(dist == 0)
assert dist.requires_grad == True
@pytest.mark.parametrize(
"itemwise_dist",
[(False), (True)],
)
def test_svd_sensitive_subspace_distance_multiple_similar_data(itemwise_dist):
n_samples = 10
n_features = 50
n_components = 10
X_train = [torch.rand((100, n_features)) for _ in range(10)]
n_samples = 10
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
metric = distances.SVDSensitiveSubspaceDistance()
metric.fit(X_train, n_components)
dist = metric(X1, X2, itemwise_dist)
if itemwise_dist:
assert list(dist.shape) == [n_samples, 1]
assert dist.requires_grad == True
else:
assert list(dist.shape) == [1, n_samples, n_samples]
assert dist.requires_grad == True
if itemwise_dist:
dist = metric(X1, X1, itemwise_dist)
assert torch.all(dist == 0)
assert dist.requires_grad == True
def test_svd_sensitive_subspace_distance_raises_error():
n_components = 10
X_train = None
metric = distances.SVDSensitiveSubspaceDistance()
with pytest.raises(TypeError):
metric.fit(X_train, n_components)
@pytest.mark.parametrize(
"itemwise_dist",
[(False), (True)],
)
def test_explore_sensitive_subspace_distance(itemwise_dist):
n_features = 50
n_samples = 100
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
Y = torch.randint(low=0, high=2, size=(n_samples,))
metric = distances.EXPLOREDistance()
metric.fit(X1, X2, Y, iters=100, batchsize=8)
dist = metric(X1, X2, itemwise_dist)
if itemwise_dist:
assert list(dist.shape) == [n_samples, 1]
assert dist.requires_grad == True
else:
assert list(dist.shape) == [1, n_samples, n_samples]
assert dist.requires_grad == True
if itemwise_dist:
dist = metric(X1, X1, itemwise_dist)
assert torch.all(dist == 0)
assert dist.requires_grad == True
def test_squared_euclidean_distance():
x1 = 2 * torch.ones(2)
x2 = torch.zeros(2)
dist = distances.SquaredEuclideanDistance()
dist.fit(num_dims=2)
distx1x2 = dist(x1, x2, True)
assert distx1x2.item() == 8
distx1x1 = dist(x1, x1, True)
assert distx1x1 == 0
def test_logistic_reg_distance_protected_idx():
X_train = torch.rand(size=(100, 3))
mean = X_train.mean(dim=0, keepdim=True)
std = X_train.std(dim=0, keepdim=True)
X_train = (X_train - mean) / std
protected_attr = torch.randint(low=0, high=2, size=(100, 1))
X_train[:, 0:1] += protected_attr
X_train = torch.hstack((X_train, protected_attr))
dist = distances.LogisticRegSensitiveSubspace()
dist.fit(X_train, protected_idxs=[3])
assert dist.basis_vectors_.shape == (4, 2)
assert dist.basis_vectors_[0, 0] > dist.basis_vectors_[1, 0]
assert len(dist.logistic_regression_models) == 1
for model in dist.logistic_regression_models:
assert isinstance(model, LogisticRegression)
def test_logistic_reg_distance_no_protected_idx():
X_train = torch.rand(size=(100, 5))
protected_attr = torch.randint(low=0, high=2, size=(100, 2)).long()
dist = distances.LogisticRegSensitiveSubspace()
dist.fit(X_train, data_SensitiveAttrs=protected_attr)
assert dist.basis_vectors_.shape == (5, 2)
assert len(dist.logistic_regression_models) == 2
for model in dist.logistic_regression_models:
assert isinstance(model, LogisticRegression)
def test_logistic_reg_distance_raises_error():
X_train = torch.rand(size=(100, 5))
protected_attr = torch.randint(low=0, high=2, size=(100, 2)).long()
dist = distances.LogisticRegSensitiveSubspace()
with pytest.raises(AssertionError):
dist.fit(X_train, data_SensitiveAttrs=protected_attr, protected_idxs=[1, 2])
protected_attr = torch.randint(low=0, high=6, size=(100, 2)).long()
dist = distances.LogisticRegSensitiveSubspace()
with pytest.raises(AssertionError):
dist.fit(X_train, protected_attr)
def test_wasserstein_distance():
"""
uses a SquaredEuclidean special case of a Mahalanobis distance to reduce the set difference between
2 batches of elements.
"""
squared_euclidean = distances.SquaredEuclideanDistance()
squared_euclidean.fit(num_dims=2)
sigma = squared_euclidean.sigma
wasserstein_dist = distances.WassersteinDistance()
wasserstein_dist.fit(sigma)
x1 = torch.randn(3, 10, 2)
x2 = torch.nn.Parameter(torch.ones_like(x1))
optimizer = torch.optim.Adam([x2], lr=0.01)
for i in range(1000):
optimizer.zero_grad()
loss = wasserstein_dist(x1, x2).sum()
loss.backward()
optimizer.step()
"""
if two sets are close in the euclidean space, the sum of the elements in the two sets must add to a similar
value
"""
assert (torch.abs(x1.sum(dim=1).sum(dim=1) - x2.sum(dim=1).sum(dim=1)) < 3.0).all()
| 7,261 | 26.507576 | 112 |
py
|
inFairness
|
inFairness-main/tests/distances/test_distance_state.py
|
import pytest
import torch
from inFairness import distances
def test_mahalanobis_dist_state_buffer_set():
dist = distances.MahalanobisDistances()
sigma = torch.rand(size=(10, 10))
dist.fit(sigma)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(state_dict["sigma"] == sigma)
sigma = torch.rand(size=(10, 10))
dist.fit(sigma)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(state_dict["sigma"] == sigma)
def test_mahalanobis_dist_state_update():
dist = distances.MahalanobisDistances()
sigma = torch.rand(size=(10, 10))
dist.fit(sigma)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(state_dict["sigma"] == sigma)
dist1 = distances.MahalanobisDistances()
dist1.load_state_dict(state_dict)
state_dict1 = dist1.state_dict()
assert "sigma" in state_dict1
assert torch.all(state_dict1["sigma"] == sigma)
def test_squared_euclidean_dist_state():
dist = distances.SquaredEuclideanDistance()
dist.fit(num_dims=5)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(torch.eye(5) == state_dict["sigma"])
def test_protected_euclidean_dist_state():
protected_attrs = [1]
num_attrs = 3
dist = distances.ProtectedEuclideanDistance()
dist.fit(protected_attrs, num_attrs)
protected_vec = torch.ones(num_attrs)
protected_vec[protected_attrs] = 0.0
state_dict = dist.state_dict()
assert "protected_vector" in state_dict
assert torch.all(protected_vec == state_dict["protected_vector"])
def test_svd_distance_state():
n_features = 50
n_components = 10
X_train = torch.rand((100, n_features))
metric = distances.SVDSensitiveSubspaceDistance()
metric.fit(X_train, n_components)
state = metric.state_dict()
assert "sigma" in state
sigma = state["sigma"]
assert sigma.shape == (n_features, n_features)
metric_new = distances.SVDSensitiveSubspaceDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
def test_explore_distance_state():
n_features = 50
n_samples = 100
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
Y = torch.randint(low=0, high=2, size=(n_samples,))
metric = distances.EXPLOREDistance()
metric.fit(X1, X2, Y, iters=100, batchsize=8)
state = metric.state_dict()
assert "sigma" in state
sigma = state["sigma"]
assert sigma.shape == (n_features, n_features)
metric_new = distances.EXPLOREDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
def test_logreg_distance_state():
n_samples, n_features = 100, 3
X_train = torch.rand(size=(n_samples, n_features))
mean = X_train.mean(dim=0, keepdim=True)
std = X_train.std(dim=0, keepdim=True)
X_train = (X_train - mean) / std
protected_attr = torch.randint(low=0, high=2, size=(n_samples, 1))
X_train[:, 0:1] += protected_attr
X_train = torch.hstack((X_train, protected_attr))
metric = distances.LogisticRegSensitiveSubspace()
metric.fit(X_train, protected_idxs=[3])
state = metric.state_dict()
assert "sigma" in state
sigma = state["sigma"]
assert sigma.shape == (n_features+1, n_features+1)
metric_new = distances.EXPLOREDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
def test_wasserstein_dist_state():
squared_euclidean = distances.SquaredEuclideanDistance()
squared_euclidean.fit(num_dims=2)
sigma = squared_euclidean.sigma
wasserstein_dist = distances.WassersteinDistance()
wasserstein_dist.fit(sigma)
state = wasserstein_dist.state_dict()
assert "sigma" in state
assert torch.all(state["sigma"] == sigma)
metric_new = distances.WassersteinDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
| 4,247 | 26.230769 | 70 |
py
|
inFairness
|
inFairness-main/tests/utils/test_datautils.py
|
import pytest
import numpy as np
from inFairness.utils import datautils
from inFairness.utils.datautils import include_exclude_terms
def test_datapair_generation_1data_random():
# Generate data pairs fewer than possible
data = np.random.random(size=(100, 5, 5))
npairs = 10
pair_idxs = datautils.generate_data_pairs(n_pairs=npairs, datasamples_1=data)
assert pair_idxs.shape == (npairs, 2)
# Generate data pairs same as possible
data = np.random.random(size=(10,))
npairs = 100
pair_idxs = datautils.generate_data_pairs(n_pairs=npairs, datasamples_1=data)
assert pair_idxs.shape == (npairs, 2)
# Generate data pairs more as possible. should raise error
data = np.random.random(size=(10,))
npairs = 101
with pytest.raises(Exception):
pair_idxs = datautils.generate_data_pairs(n_pairs=npairs, datasamples_1=data)
def test_datapair_generation_2data_random():
# Generate data pairs fewer than possible
data1 = np.random.random(size=(100, 5, 5))
data2 = np.random.random(size=(200, 3))
npairs = 10
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs, datasamples_1=data1, datasamples_2=data2
)
assert pair_idxs.shape == (npairs, 2)
# Generate data pairs same as total possible
data1 = np.random.random(size=(10,))
data2 = np.random.random(size=(20, 1, 4))
npairs = 200
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs, datasamples_1=data1, datasamples_2=data2
)
assert pair_idxs.shape == (npairs, 2)
# Generate data pairs more as possible. should raise error
data1 = np.random.random(size=(10, 6, 2))
data2 = np.random.random(size=(5, 2))
npairs = 51
with pytest.raises(Exception):
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs, datasamples_1=data1, datasamples_2=data2
)
def test_datapair_generation_1data_comparator():
# Generate data pairs fewer than possible
data = np.random.random(size=(100, 5, 5))
npairs = 10
comparator = lambda x, y: np.array_equal(x, y)
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs, datasamples_1=data, comparator=comparator
)
assert pair_idxs.shape == (npairs, 2)
# Generate data pairs more as possible. should raise error
data = np.random.random(size=(10,))
npairs = 11
comparator = lambda x, y: np.array_equal(x, y)
with pytest.raises(Exception):
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs, datasamples_1=data, comparator=comparator
)
def test_datapair_generation_2data_comparator():
# Generate data pairs fewer than possible
data1 = np.random.random(size=(100, 5, 5))
data2 = np.random.random(size=(50, 5, 5))
npairs = 10
comparator = lambda x, y: not np.array_equal(x, y)
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs, datasamples_1=data1, datasamples_2=data2, comparator=comparator
)
assert pair_idxs.shape == (npairs, 2)
# Generate data pairs more as possible. should raise error
data1 = np.random.random(size=(10, 5, 5))
data2 = data1 + 1.0
npairs = 1
comparator = lambda x, y: np.array_equal(x, y)
with pytest.raises(Exception):
pair_idxs = datautils.generate_data_pairs(
n_pairs=npairs,
datasamples_1=data1,
datasamples_2=data2,
comparator=comparator,
)
def test_include_exclude_terms():
data_terms = ["a", "c", "b"]
terms = include_exclude_terms(data_terms, include=["b", "c"])
assert terms == ["b", "c"]
terms = include_exclude_terms(data_terms, exclude=["a"])
assert terms == ["b", "c"]
terms = include_exclude_terms(data_terms)
assert terms == ["a", "b", "c"]
| 3,817 | 30.04065 | 87 |
py
|
inFairness
|
inFairness-main/tests/utils/test_normalized_discounted_cumulative_gain.py
|
import torch
import inFairness.utils.ndcg as ndcg
def test_normalized_discounted_cumulative_gain():
x = torch.tensor([10, 8.0, 1.0])
assert ndcg.normalized_discounted_cumulative_gain(x) == 1.0
x = torch.tensor([1.,2,3])
assert ndcg.normalized_discounted_cumulative_gain(x) - 0.7397 < 0.01
batch_x = torch.arange(8, dtype=torch.float).reshape(2,4)
assert (ndcg.vect_normalized_discounted_cumulative_gain(batch_x) - 0.6447 < 1e-2).all()
batch_x,_ = torch.sort(batch_x, descending=True, dim=1)
assert (ndcg.vect_normalized_discounted_cumulative_gain(batch_x) - 1. < 1e-2).all()
| 597 | 32.222222 | 89 |
py
|
inFairness
|
inFairness-main/tests/utils/test_plackett_luce.py
|
import torch
from torch.nn.parameter import Parameter
from functorch import vmap
from inFairness.utils import plackett_luce
from inFairness.utils.plackett_luce import PlackettLuce
from inFairness.utils.ndcg import vect_normalized_discounted_cumulative_gain as v_ndcg
vect_gather = vmap(torch.gather, in_dims=(None,None, 0))
batched_v_ndcg = vmap(v_ndcg, in_dims=(0))
def test_batch_plackett_luce():
"""
the idea of this test is to use normalized discounted cumulative gain to evaluate how
good the underlying plackett_luce distribution approximates some ideal relevance
after optimization, the parameterized dummy_logits should assign the highest value to
the most relevant item in the query.
"""
relevances1 = torch.arange(3,dtype=torch.float)
relevances2 = torch.arange(2,-1,-1, dtype=torch.float)
relevances = torch.stack([relevances1, relevances2])
montecarlo_samples = 100
dummy_logits = Parameter(torch.randn(2,3))
plackett_luce = PlackettLuce(dummy_logits)
optimizer = torch.optim.Adam([dummy_logits],lr=0.01)
for _ in range(1000):
optimizer.zero_grad()
sampled_indices = plackett_luce.sample((montecarlo_samples,))
log_probs = plackett_luce.log_prob(sampled_indices)
pred_relevances = vect_gather(relevances,1,sampled_indices)
utility = -batched_v_ndcg(pred_relevances)*log_probs
utility.mean().backward()
optimizer.step()
#the dummy logits should be increasing for the increasing relevances and decreasing for the others
dummy_increasing, dummy_decreasing = dummy_logits[0], dummy_logits[1]
assert all([(dummy_increasing[i] <= dummy_increasing[i+1]).item() for i in range(2)])
assert all([(dummy_decreasing[i] >= dummy_decreasing[i+1]).item() for i in range(2)])
| 1,752 | 34.77551 | 100 |
py
|
inFairness
|
inFairness-main/tests/utils/test_initializer.py
|
import pytest
from inFairness.utils.misc import initializer
def test_initializer():
class MyClass:
@initializer
def __init__(self, a, b=1):
pass
x = MyClass(a=1, b=2)
assert x.a == 1 and x.b == 2
x = MyClass(a=1)
assert x.a == 1 and x.b == 1
if __name__ == "__main__":
test_initializer()
| 345 | 17.210526 | 45 |
py
|
inFairness
|
inFairness-main/tests/auditor/test_senstir_auditor.py
|
import pytest
import torch
from mock import patch
from inFairness.auditor import SenSTIRAuditor
from inFairness.distances import (
SensitiveSubspaceDistance,
SquaredEuclideanDistance,
)
def mock_torch_rand_like(*size):
return torch.ones_like(*size)
@patch("torch.rand_like", mock_torch_rand_like)
def test_sestirauditor_generate_worst_case_examples():
batch_size = 2
query_size = 10
feature_size = 2
num_steps = 1000
lr = 0.005
max_noise = 0.5
min_noise = -0.5
lambda_param = torch.tensor(3000.0)
# let's create a Sensitive subspace distance in the input space
distance_x = SensitiveSubspaceDistance()
# we use the second dimension in the basis vector because the projection complement will give us the first
basis_vectors_ = torch.tensor([[0], [1.]])
distance_x.fit(basis_vectors_)
# distance between sets of items
distance_y = SquaredEuclideanDistance()
distance_y.fit(num_dims=query_size)
auditor = SenSTIRAuditor(
distance_x, distance_y, num_steps, lr, max_noise, min_noise
)
# let's create a dummy network equally sensitive in both dimensions
network = torch.nn.Linear(feature_size, 1, bias=None)
network.weight.data = torch.ones((1, feature_size))
# now some dummy batch of queries
Q = torch.randn(batch_size, query_size, feature_size)
Q_worst = auditor.generate_worst_case_examples(
network, Q, lambda_param, torch.optim.Adam
)
# since the first dimension is sensitive, the examples should differ quite a bit in the second dimension while being similar in the first
first_dim_Q = Q[:, :, 0]
second_dim_Q = Q[:, :, 1]
first_dim_Q_worst = Q_worst[:, :, 0]
second_dim_Q_worst = Q_worst[:, :, 1]
# if two sets differ, their values should add to a high value
assert (torch.abs(second_dim_Q.sum(1) - second_dim_Q_worst.sum(1)) > 10.0).all()
# if two sets are close, their sum should add to a similar value
assert (torch.abs(first_dim_Q.sum(1) - first_dim_Q_worst.sum(1)) < 1.0).all()
| 2,070 | 30.378788 | 141 |
py
|
inFairness
|
inFairness-main/tests/auditor/test_sensei_auditor.py
|
import pytest
import numpy as np
from mock import patch
import torch
from torch.nn import functional as F
from inFairness.auditor import SenSeIAuditor
def mock_adam_optim(
params, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False
):
return torch.optim.SGD(params, lr=lr)
def my_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
def mock_torch_rand(*size):
return torch.ones(*size)
def test_sensei_init():
xdim = 3
ydim = 1
n_fair_steps = 1
fair_lr = 1.0
network = MockPerceptron(xdim, ydim)
lambda_param = torch.tensor(1.0)
distance_x = my_dist
distance_y = my_dist
n_fair_steps = 100
fair_lr = 100
sensei = SenSeIAuditor(
distance_x=distance_x, distance_y=distance_y, num_steps=n_fair_steps, lr=fair_lr
)
assert sensei.num_steps == n_fair_steps
assert sensei.lr == fair_lr
@patch("torch.optim.Adam", mock_adam_optim)
@patch("torch.rand", mock_torch_rand)
def test_sensrauditor_generate_worse_case_examples():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
fair_lr = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
lamb = torch.tensor(1.0)
distance_x = my_dist
distance_y = my_dist
for param in network.parameters():
param.data.fill_(float(1.0))
se_auditor = SenSeIAuditor(
distance_x=distance_x,
distance_y=distance_y,
num_steps=n_fair_steps,
lr=fair_lr,
max_noise=max_noise,
min_noise=min_noise,
)
output = se_auditor.generate_worst_case_examples(
network=network, x=x, lambda_param=lamb
)
assert np.array_equal(list(output.size()), list(x.size()))
@pytest.mark.parametrize(
"audit_threshold,lambda_param,confidence,optimizer",
[
(None, None, 0.95, None),
(None, None, 0.95, torch.optim.Adam),
(1.25, None, 0.95, None),
(1.25, 0.25, 0.85, torch.optim.Adam),
],
)
def test_sensei_auditing(audit_threshold, lambda_param, confidence, optimizer):
xdim = 50
ydim = 1
B = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
distance_x = my_dist
distance_y = my_dist
n_fair_steps = 10
fair_lr = 0.01
auditor = SenSeIAuditor(
distance_x=distance_x, distance_y=distance_y, num_steps=n_fair_steps, lr=fair_lr
)
X_audit = torch.rand(size=(B, xdim), dtype=torch.float64)
Y_audit = torch.rand(size=(B, ydim), dtype=torch.float64)
response = auditor.audit(
network,
X_audit,
Y_audit,
loss_fn,
audit_threshold,
lambda_param,
confidence,
optimizer,
)
assert response.lossratio_mean is not None and isinstance(
response.lossratio_mean, float
)
assert response.lossratio_std is not None and isinstance(
response.lossratio_std, float
)
assert response.lower_bound is not None and isinstance(response.lower_bound, float)
if audit_threshold is None:
assert response.threshold is None
assert response.pval is None
assert response.confidence is None
assert response.is_model_fair is None
else:
assert response.threshold is not None and isinstance(response.threshold, float)
assert response.pval is not None and isinstance(response.pval, float)
assert response.confidence == confidence
assert response.is_model_fair is not None and isinstance(
response.is_model_fair, bool
)
| 3,926 | 24.5 | 88 |
py
|
inFairness
|
inFairness-main/tests/auditor/test_auditor.py
|
from re import X
import pytest
import numpy as np
from inFairness.auditor import Auditor
from mock import patch
import torch
from torch.nn import functional as F
def mock_adam_optim(
params, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False
):
return torch.optim.SGD(params, lr=lr)
def my_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
def mock_torch_rand(*size):
return torch.ones(*size)
def test_auditor_loss_ratio():
xdim = 50
ydim = 1
B = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.l1_loss
auditor = Auditor()
X_audit = torch.rand(size=(B, xdim), dtype=torch.float64)
X_worst = torch.rand(size=(B, xdim), dtype=torch.float64)
Y_audit = torch.rand(size=(B, ydim), dtype=torch.float64)
loss_ratio = auditor.compute_loss_ratio(X_audit, X_worst, Y_audit, network, loss_fn)
assert np.array_equal(loss_ratio.shape, [B, 1])
| 1,177 | 21.226415 | 88 |
py
|
inFairness
|
inFairness-main/tests/auditor/test_sensr_auditor.py
|
import pytest
import numpy as np
from inFairness.auditor import SenSRAuditor
from mock import patch
import torch
from torch.nn import functional as F
def mock_adam_optim(
params, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False
):
return torch.optim.SGD(params, lr=lr)
def my_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
def mock_torch_rand(*size):
return torch.ones(*size)
def test_sensrauditor_init():
xdim = 3
ydim = 1
n_fair_steps = 100
fair_lr = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
lamb = torch.tensor(1.0)
distance_x = my_dist
sensr = SenSRAuditor(
loss_fn=loss_fn, distance_x=distance_x, num_steps=n_fair_steps, lr=fair_lr
)
assert sensr.num_steps == n_fair_steps
assert sensr.lr == fair_lr
@patch("torch.optim.Adam", mock_adam_optim)
@patch("torch.rand", mock_torch_rand)
def test_sensrauditor_generate_worse_case_examples():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
fair_lr = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
lamb = torch.tensor(1.0)
distance_x = my_dist
for param in network.parameters():
param.data.fill_(float(1.0))
sr_auditor = SenSRAuditor(
loss_fn=loss_fn,
distance_x=distance_x,
num_steps=n_fair_steps,
lr=fair_lr,
max_noise=max_noise,
min_noise=min_noise,
)
output = sr_auditor.generate_worst_case_examples(
network=network, x=x, y=y, lambda_param=lamb
)
assert np.array_equal(list(output.size()), list(x.size()))
@pytest.mark.parametrize(
"audit_threshold,lambda_param,confidence,optimizer",
[
(None, None, 0.95, None),
(None, None, 0.95, torch.optim.Adam),
(1.25, None, 0.95, None),
(1.25, 0.25, 0.85, torch.optim.Adam),
],
)
def test_sensr_auditing(audit_threshold, lambda_param, confidence, optimizer):
xdim = 50
ydim = 1
B = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
distance_x = my_dist
n_fair_steps = 10
fair_lr = 0.01
auditor = SenSRAuditor(
loss_fn=loss_fn, distance_x=distance_x, num_steps=n_fair_steps, lr=fair_lr
)
X_audit = torch.rand(size=(B, xdim), dtype=torch.float64)
Y_audit = torch.rand(size=(B, ydim), dtype=torch.float64)
response = auditor.audit(
network, X_audit, Y_audit, audit_threshold, lambda_param, confidence, optimizer
)
assert response.lossratio_mean is not None and isinstance(
response.lossratio_mean, float
)
assert response.lossratio_std is not None and isinstance(
response.lossratio_std, float
)
assert response.lower_bound is not None and isinstance(response.lower_bound, float)
if audit_threshold is None:
assert response.threshold is None
assert response.pval is None
assert response.confidence is None
assert response.is_model_fair is None
else:
assert response.threshold is not None and isinstance(response.threshold, float)
assert response.pval is not None and isinstance(response.pval, float)
assert response.confidence == confidence
assert response.is_model_fair is not None and isinstance(
response.is_model_fair, bool
)
| 3,772 | 25.384615 | 87 |
py
|
inFairness
|
inFairness-main/tests/fairalgo/test_sensei.py
|
import pytest
import numpy as np
from inFairness.auditor import SenSeIAuditor
from inFairness.fairalgo import SenSeI
from mock import patch
import torch
from torch.nn import functional as F
def mock_generate_worst_case_examples(cls, network, x, lambda_param):
return torch.ones_like(x) * -1.0
def mock_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
@patch(
"inFairness.auditor.SenSeIAuditor.generate_worst_case_examples",
mock_generate_worst_case_examples,
)
def test_sensei_forward_train():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
lr = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
rho = 1.0
eps = 1.0
distance_x = mock_dist
distance_y = mock_dist
for param in network.parameters():
param.data.fill_(float(1.0))
sensei = SenSeI(
network, distance_x, distance_y, loss_fn, rho, eps, n_fair_steps, lr
)
response = sensei.forward(x, y)
assert torch.abs(torch.mean(response.loss) - torch.tensor(81.0)) < 0.000001
assert torch.abs(torch.mean(response.y_pred) - torch.tensor(3.0)) < 0.000001
assert isinstance(sensei.auditor, SenSeIAuditor)
| 1,593 | 24.709677 | 80 |
py
|
inFairness
|
inFairness-main/tests/fairalgo/test_senstir.py
|
import torch
from inFairness.distances import (
SensitiveSubspaceDistance,
SquaredEuclideanDistance,
)
from inFairness.fairalgo import SenSTIR
def generate_test_data(num_batches, queries_per_batch, items_per_query):
num_features = 2
item_data = torch.rand(
num_batches, queries_per_batch, items_per_query, num_features
)
relevances = torch.sum(item_data, dim=3)
# mask the second dimension for some items
mask = torch.ones(num_batches, queries_per_batch, items_per_query, 1)
mask = torch.cat([mask, mask.clone().bernoulli_(0.8)], dim=3)
item_data *= mask
return item_data, relevances
def test_senstir():
num_steps = 200
queries_per_batch = 10
items_per_query = 5
feature_size = 2
# dummy synthetic data
item_data, relevances = generate_test_data(
num_steps, queries_per_batch, items_per_query
)
# let's create a Sensitive subspace distance in the input space
distance_x = SensitiveSubspaceDistance()
# we use the second dimension in the basis vector because the projection complement will give us the first
basis_vectors_ = torch.tensor([[0], [1.]])
distance_x.fit(basis_vectors_)
distance_y = SquaredEuclideanDistance()
distance_y.fit(num_dims=items_per_query)
# dummy network equally sensitive in both dimensions
network = torch.nn.Linear(feature_size, 1, bias=None)
network.weight.data = (
torch.ones((1, feature_size)) + torch.rand((1, feature_size)) * 0.01
)
fair_algo = SenSTIR(
network,
distance_x,
distance_y,
rho=0.1,
eps=0.001,
auditor_nsteps=10,
auditor_lr=0.05,
monte_carlo_samples_ndcg=60,
)
fair_algo.train()
optimizer = torch.optim.Adam(fair_algo.parameters(), lr=0.01)
for i in range(num_steps):
optimizer.zero_grad()
loss = fair_algo(item_data[i], relevances[i]).loss
loss.backward()
optimizer.step()
weights = network.weight.data.squeeze()
# the ratio of the first component of this vector should be greater than 3
# so that the response of the network should be majorly on the first dimension
assert weights[0] / weights[1] > 3.0
| 2,236 | 28.826667 | 110 |
py
|
inFairness
|
inFairness-main/tests/fairalgo/test_sensr.py
|
import pytest
import numpy as np
from inFairness.auditor import SenSRAuditor
from inFairness.fairalgo import SenSR
from mock import patch
import torch
from torch.nn import functional as F
def mock_generate_worst_case_examples(cls, network, x, y, lambda_param):
return torch.ones_like(x) * -1.0
def mock_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
@patch(
"inFairness.auditor.SenSRAuditor.generate_worst_case_examples",
mock_generate_worst_case_examples,
)
def test_sensr_forward_train():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
lr_lamb = 1.0
lr_param = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
eps = 1.0
distance_x = mock_dist
for param in network.parameters():
param.data.fill_(float(1.0))
sensr = SenSR(
network, distance_x, loss_fn, eps, lr_lamb, lr_param, n_fair_steps, lr_lamb
)
response = sensr.forward(x, y)
assert torch.abs(torch.mean(response.loss) - torch.tensor(9.0)) < 0.000001
assert torch.abs(torch.mean(response.y_pred) - torch.tensor(3.0)) < 0.000001
assert isinstance(sensr.auditor, SenSRAuditor)
| 1,576 | 24.852459 | 83 |
py
|
inFairness
|
inFairness-main/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
package_path = os.path.abspath('../..')
infairness_path = os.path.join(package_path, 'inFairness')
sys.path.insert(0, package_path)
# -- Project information -----------------------------------------------------
project = 'inFairness'
copyright = '2022, IBM Research'
author = 'IBM Research'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinxcontrib.apidoc',
'sphinx_design',
'myst_parser',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.gtagjs'
]
# API doc configuration
apidoc_module_dir = infairness_path
# apidoc_output_dir = ''
apidoc_excluded_paths = []
apidoc_separate_modules = True
# Google analytics tracking
gtagjs_ids = [
'G-3QDFV4L7YB',
]
# Napolean docstring configuration
napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Markdown and ReST parser extension configuration
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
html_theme_options = {
"sidebar_hide_name": True,
"announcement": ('<p><small>Check out our demonstration exploring '
'individual and group fairness of three BERT-based '
'toxic text classification models '
'<a class="sd-sphinx-override sd-badge sd-text-wrap '
'sd-bg-dark sd-bg-text-dark reference external" target="_blank"'
'href="https://fairbert.vizhub.ai">'
f'<span>Demonstration</span></a></small></p>'),
"dark_css_variables": {
"color-announcement-background": "#935610",
"color-announcement-text": "#FFFFFF",
},
"light_css_variables": {
"color-announcement-background": "#935610",
"color-announcement-text": "#FFFFFF",
},
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "_static/imgs/infairness-logo.png"
html_title = project
intersphinx_mapping = {
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'torch': ('https://pytorch.org/docs/1.9.0/', None)
}
myst_enable_extensions = [
"amsmath",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
| 3,873 | 27.910448 | 79 |
py
|
inFairness
|
inFairness-main/inFairness/__init__.py
| 0 | 0 | 0 |
py
|
|
inFairness
|
inFairness-main/inFairness/postprocessing/datainterfaces.py
|
from typing import Dict
import torch
from dataclasses import dataclass
@dataclass
class PostProcessingObjectiveResponse:
"""Class to store the result from a post-processing algorithm"""
y_solution: torch.Tensor = None
objective: Dict = None
| 256 | 20.416667 | 68 |
py
|
inFairness
|
inFairness-main/inFairness/postprocessing/data_ds.py
|
import torch
from inFairness.postprocessing.distance_ds import DistanceStructure
class PostProcessingDataStore(object):
"""Data strucuture to hold the data used for post-processing
Parameters
-------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
"""
def __init__(self, distance_x):
self.data_X = None
self.data_Y = None
self.n_samples = 0
self.distance_ds = DistanceStructure(distance_x)
@property
def distance_matrix(self):
"""Distances between N data points. Shape: (N, N)"""
return self.distance_ds.distance_matrix
def add_datapoints_X(self, X: torch.Tensor):
"""Add datapoints to the input datapoints X
Parameters
------------
X: torch.Tensor
New data points to add to the input data
`X` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
"""
if self.data_X is None:
self.data_X = X
else:
self.data_X = torch.cat([self.data_X, X], dim=0)
def add_datapoints_Y(self, Y: torch.Tensor):
"""Add datapoints to the output datapoints Y
Parameters
------------
Y: torch.Tensor
New data points to add to the output data
`Y` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
"""
if self.data_Y is None:
self.data_Y = Y
else:
self.data_Y = torch.cat([self.data_Y, Y], dim=0)
def add_datapoints(self, X: torch.Tensor, Y: torch.Tensor):
"""Add new datapoints to the existing datapoints
Parameters
------------
X: torch.Tensor
New data points to add to the input data
`X` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
Y: torch.Tensor
New data points to add to the output data
`Y` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
"""
self.add_datapoints_X(X)
self.add_datapoints_Y(Y)
self.n_samples = self.n_samples + X.shape[0]
self.distance_ds.build_distance_matrix(self.data_X)
def reset(self):
"""Reset the data structure holding the data points for post-processing.
Invoking this operation removes all datapoints and resets the state back
to the initial state.
"""
self.data_X = None
self.data_Y = None
self.n_samples = 0
self.distance_ds.reset()
| 2,843 | 30.955056 | 80 |
py
|
inFairness
|
inFairness-main/inFairness/postprocessing/__init__.py
|
from inFairness.postprocessing.glif import GraphLaplacianIF
from inFairness.postprocessing.base_postprocessing import BasePostProcessing
__all__ = [symb for symb in globals() if not symb.startswith("_")]
| 206 | 33.5 | 76 |
py
|
inFairness
|
inFairness-main/inFairness/postprocessing/glif.py
|
import torch
import numpy as np
from inFairness.utils.postprocessing import (
build_graph_from_dists,
get_laplacian,
laplacian_solve,
)
from inFairness.postprocessing.base_postprocessing import BasePostProcessing
from inFairness.postprocessing.datainterfaces import PostProcessingObjectiveResponse
class GraphLaplacianIF(BasePostProcessing):
"""Implements the Graph Laplacian Individual Fairness Post-Processing method.
Proposed in `Post-processing for Individual Fairness <https://arxiv.org/abs/2110.13796>`_
Parameters
------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
is_output_probas: bool
True if the `data_Y` (model output) are probabilities implying that
this is a classification setting, and False if the `data_Y` are
in euclidean space implying that this is a regression setting.
"""
def __init__(self, distance_x, is_output_probas):
super().__init__(distance_x, is_output_probas=is_output_probas)
self._METHOD_COORDINATE_KEY = "coordinate-descent"
self._METHOD_EXACT_KEY = "exact"
def __exact_pp__(self, lambda_param, scale, threshold, normalize):
"""Implements Exact version of post processing"""
y_hat = self.__get_yhat__()
W, idxs = build_graph_from_dists(
self.distance_matrix, scale, threshold, normalize
)
data_y_idxs = y_hat[idxs]
L = get_laplacian(W, normalize)
if normalize:
L = (L + L.T) / 2
y = laplacian_solve(L, data_y_idxs, lambda_param)
data_y_new = torch.clone(y_hat)
data_y_new[idxs] = y
objective = self.get_objective(
data_y_new, lambda_param, scale, threshold, normalize, W, idxs, L
)
return data_y_new, objective
def __coordinate_update__(
self,
yhat_batch,
W_batch,
y,
batchidx,
lambda_param,
D_inv_batch=None,
diag_W_batch=None,
D_batch=None,
):
W_xy = W_batch.unsqueeze(-1) * y.unsqueeze(0)
"""
Shapes:
W_batch: (bsz, nsamples)
y: (nsamples, ncls - 1)
W_xy: (bsz, nsamples, ncls-1)
W_xy_corr: (bsz, ncls-1)
numerator: (bsz, ncls-1)
denominator: (bsz, 1)
"""
if D_inv_batch is None:
W_xy_corr = torch.diagonal(W_xy[:, batchidx], offset=0, dim1=0, dim2=1).T
numerator = yhat_batch + lambda_param * (W_xy.sum(dim=1) - W_xy_corr)
denominator = 1 + lambda_param * (
W_batch.sum(dim=1, keepdim=True) - diag_W_batch.view(-1, 1)
)
y_new = numerator / denominator
else:
W_xy = W_xy * D_inv_batch.unsqueeze(-1)
W_xy_corr = torch.diagonal(W_xy[:, batchidx], offset=0, dim1=0, dim2=1).T
numerator = yhat_batch + (lambda_param * (W_xy.sum(dim=1) - W_xy_corr)) / 2
denominator = (
1
+ lambda_param
- lambda_param * diag_W_batch.view(-1, 1) / D_batch.view(-1, 1)
)
y_new = numerator / denominator
return y_new
def __coordinate_pp__(
self, lambda_param, scale, threshold, normalize, batchsize, epochs
):
"""Implements coordinate descent for large-scale data"""
y_hat = self.__get_yhat__()
y_copy = y_hat.clone()
n_samples = self.datastore.n_samples
W, idxs = build_graph_from_dists(
self.distance_matrix, scale, threshold, normalize
)
data_y_idxs = y_hat[idxs]
W_diag = torch.diag(W)
if normalize:
D = W.sum(dim=1)
D_inv = 1 / D.reshape(1, -1) + 1 / D.reshape(-1, 1)
for epoch_idx in range(epochs):
epoch_idxs_random = np.random.permutation(n_samples)
curridx = 0
while curridx < n_samples:
batchidxs = epoch_idxs_random[curridx : curridx + batchsize]
if normalize:
y_copy[batchidxs] = self.__coordinate_update__(
data_y_idxs[batchidxs],
W[batchidxs],
y_copy,
batchidxs,
lambda_param=lambda_param,
D_inv_batch=D_inv[batchidxs],
diag_W_batch=W_diag[batchidxs],
D_batch=D[batchidxs],
)
else:
y_copy[batchidxs] = self.__coordinate_update__(
data_y_idxs[batchidxs],
W[batchidxs],
y_copy,
batchidxs,
lambda_param=lambda_param,
diag_W_batch=W_diag[batchidxs],
)
curridx += batchsize
pp_sol = y_hat.clone()
pp_sol[idxs] = y_copy
objective = self.get_objective(
pp_sol, lambda_param, scale, threshold, normalize, W, idxs
)
return pp_sol, objective
def get_objective(
self,
y_solution,
lambda_param: float,
scale: float,
threshold: float,
normalize: bool = False,
W_graph=None,
idxs=None,
L=None,
):
"""Compute the objective values for the individual fairness as follows:
.. math:: \\widehat{\\mathbf{f}} = \\arg \\min_{\\mathbf{f}} \\ \\|\\mathbf{f} - \\hat{\\mathbf{y}}\\|_2^2 + \\lambda \\ \\mathbf{f}^{\\top}\\mathbb{L_n} \\mathbf{f}
Refer equation 3.1 in the paper
Parameters
------------
y_solution: torch.Tensor
Post-processed solution values of shape (N, C)
lambda_param: float
Weight for the Laplacian Regularizer
scale: float
Parameter used to scale the computed distances.
Refer equation 2.2 in the proposing paper.
threshold: float
Parameter used to construct the Graph from distances
Distances below provided threshold are considered to be
connected edges, while beyond the threshold are considered to
be disconnected. Refer equation 2.2 in the proposing paper.
normalize: bool
Whether to normalize the computed Laplacian or not
W_graph: torch.Tensor
Adjacency matrix of shape (N, N)
idxs: torch.Tensor
Indices of data points which are included in the adjacency matrix
L: torch.Tensor
Laplacian of the adjacency matrix
Returns
---------
objective: PostProcessingObjectiveResponse
post-processed solution containing two parts:
(a) Post-processed output probabilities of shape (N, C)
where N is the number of data samples, and C is the
number of output classes
(b) Objective values. Refer equation 3.1 in the paper
for an explanation of the various parts
"""
if W_graph is None or idxs is None:
W_graph, idxs = build_graph_from_dists(
self.distance_matrix, scale, threshold, normalize
)
if L is None:
L = get_laplacian(W_graph, normalize)
y_hat = self.__get_yhat__()
y_dist = ((y_hat - y_solution) ** 2).sum()
L_obj = lambda_param * (y_solution[idxs] * (L @ y_solution[idxs])).sum()
overall_objective = y_dist + L_obj
result = {
"y_dist": y_dist.item(),
"L_objective": L_obj.item(),
"overall_objective": overall_objective.item(),
}
return result
def postprocess(
self,
method: str,
lambda_param: float,
scale: float, # 0.001
threshold: float, # median of all distances if None
normalize: bool = False,
batchsize: int = None,
epochs: int = None,
):
"""Implements the Graph Laplacian Individual Fairness Post-processing algorithm
Parameters
-------------
method: str
GLIF method type. Possible values are:
(a) `coordinate-descent` method which is more suitable for
large-scale data and post-processes by batching data into minibatches
(see section 3.2.2 of the paper), or
(b) `exact` method which gets the exact solution but is not appropriate
for large-scale data (refer equation 3.3 in the paper).
lambda_param: float
Weight for the Laplacian Regularizer
scale: float
Parameter used to scale the computed distances.
Refer equation 2.2 in the proposing paper.
threshold: float
Parameter used to construct the Graph from distances
Distances below provided threshold are considered to be
connected edges, while beyond the threshold are considered to
be disconnected. Refer equation 2.2 in the proposing paper.
normalize: bool
Whether to normalize the computed Laplacian or not
batchsize: int
Batch size. *Required when method=`coordinate-descent`*
epochs: int
Number of coordinate descent epochs.
*Required when method=`coordinate-descent`*
Returns
-----------
solution: PostProcessingObjectiveResponse
post-processed solution containing two parts:
(a) Post-processed output probabilities of shape (N, C)
where N is the number of data samples, and C is the
number of output classes
(b) Objective values. Refer equation 3.1 in the paper
for an explanation of the various parts
"""
assert method in [
self._METHOD_COORDINATE_KEY,
self._METHOD_EXACT_KEY,
], f"`method` should be either `coordinate-descent` or `exact`. Value provided: {method}"
if method == self._METHOD_COORDINATE_KEY:
assert (
batchsize is not None and epochs is not None
), f"batchsize and epochs parameter is required but None provided"
if method == self._METHOD_EXACT_KEY:
data_y_new, objective = self.__exact_pp__(
lambda_param, scale, threshold, normalize
)
elif method == self._METHOD_COORDINATE_KEY:
data_y_new, objective = self.__coordinate_pp__(
lambda_param, scale, threshold, normalize, batchsize, epochs
)
if self.is_output_probas:
pp_sol = torch.exp(data_y_new) / (
1 + torch.exp(data_y_new).sum(axis=1, keepdim=True)
)
y_solution = torch.hstack((pp_sol, 1 - pp_sol.sum(axis=1, keepdim=True)))
else:
y_solution = data_y_new
result = PostProcessingObjectiveResponse(
y_solution=y_solution, objective=objective
)
return result
| 11,440 | 34.977987 | 174 |
py
|
inFairness
|
inFairness-main/inFairness/postprocessing/base_postprocessing.py
|
import torch
from typing import Tuple
from inFairness.postprocessing.data_ds import PostProcessingDataStore
class BasePostProcessing(object):
"""Base class for Post-Processing methods
Parameters
-------------
distance_x: inFairness.distances.Distance
Distance matrix in the input space
is_output_probas: bool
True if the `data_Y` (model output) are probabilities implying that
this is a classification setting, and False if the `data_Y` are
in euclidean space implying that this is a regression setting.
"""
def __init__(self, distance_x, is_output_probas):
self.distance_x = distance_x
self.is_output_probas = is_output_probas
self.datastore = PostProcessingDataStore(distance_x)
@property
def data(self):
"""Input and Output data used for post-processing
Returns
--------
data: Tuple(torch.Tensor, torch.Tensor)
A tuple of (X, Y) data points
"""
return (self.datastore.data_X, self.datastore.data_Y)
@property
def distance_matrix(self):
"""Distance matrix
Returns
--------
distance_matrix: torch.Tensor
Matrix of distances of shape (N, N) where
N is the number of data samples
"""
return self.datastore.distance_matrix
def add_datapoints(self, X: torch.Tensor, y: torch.Tensor):
"""Add datapoints to the post-processing method
Parameters
-----------
X: torch.Tensor
New input datapoints
y: torch.Tensor
New output datapoints
"""
self.datastore.add_datapoints(X, y)
def reset_datapoints(self):
"""Reset datapoints store back to its initial state"""
self.datastore.reset()
def postprocess(self, *args, **kwargs):
raise NotImplementedError("postprocess method not implemented by class")
def __get_yhat__(self):
_, data_y = self.data
if self.is_output_probas:
y_hat = torch.log(data_y[:, :-1]) - torch.log(data_y[:, -1]).view(-1, 1)
return y_hat
else:
return data_y
| 2,253 | 28.272727 | 84 |
py
|
inFairness
|
inFairness-main/inFairness/postprocessing/distance_ds.py
|
import torch
class DistanceStructure(object):
"""Data structure to store and track the distance matrix between data points
Parameters
-------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
"""
def __init__(self, distance_x):
self.distance_x = distance_x
self.distance_matrix = None
def reset(self):
"""Reset the state of the data structure back to its initial state"""
self.distance_matrix = None
def build_distance_matrix(self, data_X):
"""Build the distance matrix between input data samples `data_X`
Parameters
-------------
data_X: torch.Tensor
Data points between which the distance matrix is to be computed
"""
nsamples_old = (
0 if self.distance_matrix is None else self.distance_matrix.shape[0]
)
nsamples_total = data_X.shape[0]
device = data_X.device
distance_matrix_new = torch.zeros(
size=(nsamples_total, nsamples_total), device=device
)
if self.distance_matrix is not None:
distance_matrix_new[:nsamples_old, :nsamples_old] = self.distance_matrix
dist = (
self.distance_x(
data_X[nsamples_old:nsamples_total], data_X, itemwise_dist=False
)
.detach()
.squeeze()
)
distance_matrix_new[nsamples_old:, :] = dist
distance_matrix_new[:, nsamples_old:] = dist.T
self.distance_matrix = distance_matrix_new.clone()
| 1,604 | 28.722222 | 84 |
py
|
inFairness
|
inFairness-main/inFairness/distances/wasserstein_distance.py
|
import torch
from ot import emd2
from inFairness.distances import MahalanobisDistances
class WassersteinDistance(MahalanobisDistances):
"""computes a batched Wasserstein Distance for pairs of sets of items on each batch in the tensors
with dimensions B, N, D and B, M, D where B and D are the batch and feature sizes and N and M are the number of items on each batch.
Currently only supporting distances inheriting from :class: `MahalanobisDistances`.
transforms an Mahalanobis Distance object so that the forward method becomes a differentiable batched
Wasserstein distance between sets of items. This Wasserstein distance will use the underlying Mahalanobis
distance as pairwise cost function to solve the optimal transport problem.
for more information see equation 2.5 of the reference bellow
References
----------
`Amanda Bower, Hamid Eftekhari, Mikhail Yurochkin, Yuekai Sun:
Individually Fair Rankings. ICLR 2021`
"""
def __init__(self):
super().__init__()
def forward(self, X1: torch.Tensor, X2: torch.Tensor):
"""computes a batch wasserstein distance implied by the cost function represented by an
underlying mahalanobis distance.
Parameters
--------------
X1: torch.Tensor
Data sample of shape (B, N, D)
X2: torch.Tensor
Data sample of shape (B, M, D)
Returns
--------
dist: torch.Tensor
Wasserstein distance of shape (B) between batch samples in X1 and X2
"""
costs = super().forward(X1, X2, itemwise_dist=False)
uniform_x1 = torch.ones(X1.shape[1]) / X1.shape[1]
uniform_x2 = torch.ones(X2.shape[1]) / X2.shape[1]
num_batches = X1.shape[0]
dist = torch.stack(
[emd2(uniform_x1, uniform_x2, costs[j]) for j in range(num_batches)]
)
return dist
| 1,924 | 34.648148 | 136 |
py
|
inFairness
|
inFairness-main/inFairness/distances/explore_distance.py
|
import numpy as np
import torch
from scipy.stats import logistic
from inFairness.utils import datautils
from inFairness.distances.mahalanobis_distance import MahalanobisDistances
class EXPLOREDistance(MahalanobisDistances):
"""Implements the Embedded Xenial Pairs Logistic Regression metric
(EXPLORE) defined in Section 2.2 of Two Simple Ways to Learn Individual
Fairness Metrics from Data.
EXPLORE defines the distance in the input space to be of the form:
.. math:: d_x(x_1, x_2) := \langle \phi(x_1) - \phi(x_2), \Sigma (\phi(x_1) - \phi(x_2)) \\rangle
where :math:`\phi(x)` is an embedding map and :math:`\Sigma` is a semi-positive
definite matrix.
The metric expects the data to be in the form of triplets
:math:`\{(x_{i_1}, x_{i_2}, y_i)\}_{i=1}^{n}` where :math:`y_i \in \{0, 1\}`
indicates whether the human considers :math:`x_{i_1}` and :math:`x_{i_2}`
comparable (:math:`y_i = 1` indicates comparable) or not.
References
-----------
`Mukherjee, Debarghya, Mikhail Yurochkin, Moulinath Banerjee, and Yuekai Sun.
"Two simple ways to learn individual fairness metrics from data." In
International Conference on Machine Learning, pp. 7097-7107. PMLR, 2020.`
"""
def __init__(self):
super().__init__()
def fit(self, X1, X2, Y, iters, batchsize, autoinfer_device=True):
"""Fit EXPLORE distance metric
Parameters
-----------
X1: torch.Tensor
first set of input samples
X2: torch.Tensor
second set of input samples
Y: torch.Tensor
:math:`y_i` vector containing 1 if corresponding elements from
X1 and X2 are comparable, and 0 if not
iters: int
number of iterations of SGD to compute the :math:`\Sigma` matrix
batchsize: int
batch size of each iteration
autoinfer_device: bool
Should the distance metric be automatically moved to an appropriate
device (CPU / GPU) or not? If set to True, it moves the metric
to the same device `X1` is on. If set to False, keeps the metric
on CPU.
"""
assert (
X1.shape[0] == X2.shape[0] == Y.shape[0]
), "Number of elements in X1, X2, and Y do not match"
X = datautils.convert_tensor_to_numpy(X1 - X2)
Y = datautils.convert_tensor_to_numpy(Y)
sigma = self.compute_sigma(X, Y, iters, batchsize)
super().fit(sigma)
if autoinfer_device:
device = datautils.get_device(X1)
super().to(device)
def __grad_likelihood__(self, X, Y, sigma):
"""Computes the gradient of the likelihood function using sigmoidal link"""
diag = np.einsum("ij,ij->i", np.matmul(X, sigma), X)
diag = np.maximum(diag, 1e-10)
prVec = logistic.cdf(diag)
sclVec = 2.0 / (np.exp(diag) - 1)
vec = (Y * prVec) - ((1 - Y) * prVec * sclVec)
grad = np.matmul(X.T * vec, X) / X.shape[0]
return grad
def __projPSD__(self, sigma):
"""Computes the projection onto the PSD cone"""
try:
L = np.linalg.cholesky(sigma)
sigma_hat = np.dot(L, L.T)
except np.linalg.LinAlgError:
d, V = np.linalg.eigh(sigma)
sigma_hat = np.dot(
V[:, d >= 1e-8], d[d >= 1e-8].reshape(-1, 1) * V[:, d >= 1e-8].T
)
return sigma_hat
def compute_sigma(self, X, Y, iters, batchsize):
N = X.shape[0]
P = X.shape[1]
sigma_t = np.random.normal(0, 1, P**2).reshape(P, P)
sigma_t = np.matmul(sigma_t, sigma_t.T)
sigma_t = sigma_t / np.linalg.norm(sigma_t)
curriter = 0
while curriter < iters:
batch_idxs = np.random.choice(N, size=batchsize, replace=False)
X_batch = X[batch_idxs]
Y_batch = Y[batch_idxs]
grad_t = self.__grad_likelihood__(X_batch, Y_batch, sigma_t)
t = 1.0 / (1 + curriter // 100)
sigma_t = self.__projPSD__(sigma_t - t * grad_t)
curriter += 1
sigma = torch.FloatTensor(sigma_t).detach()
return sigma
| 4,287 | 35.338983 | 101 |
py
|
inFairness
|
inFairness-main/inFairness/distances/euclidean_dists.py
|
import torch
from inFairness.distances.distance import Distance
class EuclideanDistance(Distance):
def __init__(self):
super().__init__()
def forward(self, x, y, itemwise_dist=True):
if itemwise_dist:
return torch.cdist(x.unsqueeze(1), y.unsqueeze(1)).reshape(-1, 1)
else:
return torch.cdist(x, y)
class ProtectedEuclideanDistance(Distance):
def __init__(self):
super().__init__()
self._protected_attributes = None
self._num_attributes = None
self.register_buffer("protected_vector", torch.Tensor())
def to(self, device):
"""Moves distance metric to a particular device
Parameters
------------
device: torch.device
"""
assert (
self.protected_vector is not None and len(self.protected_vector.size()) != 0
), "Please fit the metric before moving parameters to device"
self.device = device
self.protected_vector = self.protected_vector.to(self.device)
def fit(self, protected_attributes, num_attributes):
"""Fit Protected Euclidean Distance metric
Parameters
------------
protected_attributes: Iterable[int]
List of attribute indices considered to be protected.
The metric would ignore these protected attributes while
computing distance between data points.
num_attributes: int
Total number of attributes in the data points.
"""
self._protected_attributes = protected_attributes
self._num_attributes = num_attributes
self.protected_vector = torch.ones(num_attributes)
self.protected_vector[protected_attributes] = 0.0
def forward(self, x, y, itemwise_dist=True):
"""
:param x, y: a B x D matrices
:return: B x 1 matrix with the protected distance camputed between x and y
"""
protected_x = (x * self.protected_vector).unsqueeze(1)
protected_y = (y * self.protected_vector).unsqueeze(1)
if itemwise_dist:
return torch.cdist(protected_x, protected_y).reshape(-1, 1)
else:
return torch.cdist(protected_x, protected_y)
| 2,264 | 30.458333 | 88 |
py
|
inFairness
|
inFairness-main/inFairness/distances/logistic_sensitive_subspace.py
|
from typing import Iterable
import numpy as np
import torch
from sklearn.linear_model import LogisticRegression
from inFairness.distances import SensitiveSubspaceDistance
from inFairness.utils import datautils, validationutils
class LogisticRegSensitiveSubspace(SensitiveSubspaceDistance):
"""Implements the Softmax Regression model based fair metric as defined in Appendix B.1
of "Training individually fair ML models with sensitive subspace robustness" paper.
This metric assumes that the sensitive attributes are discrete and observed for a small subset
of training data. Assuming data of the form :math:`(X_i, K_i, Y_i)` where :math:`K_i` is the
sensitive attribute of the i-th subject, the model fits a softmax regression model to the data as:
.. math:: \mathbb{P}(K_i = l\\mid X_i) = \\frac{\exp(a_l^TX_i+b_l)}{\\sum_{l=1}^k \\exp(a_l^TX_i+b_l)},\\ l=1,\\ldots,k
Using the span of the matrix :math:`A=[a_1, \cdots, a_k]`, the fair metric is trained as:
.. math:: d_x(x_1,x_2)^2 = (x_1 - x_2)^T(I - P_{\\text{ran}(A)})(x_1 - x_2)
References
-------------
`Yurochkin, Mikhail, Amanda Bower, and Yuekai Sun. "Training individually fair
ML models with sensitive subspace robustness." arXiv preprint arXiv:1907.00020 (2019).`
"""
def __init__(self):
super().__init__()
self.basis_vectors_ = None
self._logreg_models = None
@property
def logistic_regression_models(self):
"""Logistic Regression models trained by the metric to predict each sensitive attribute
given inputs. The property is a list of logistic regression models each corresponding to
:math:`\mathbb{P}(K_i = l\\mid X_i)`. This property can be used to measure the performance
of the logistic regression models.
"""
return self._logreg_models
def fit(
self,
data_X: torch.Tensor,
data_SensitiveAttrs: torch.Tensor = None,
protected_idxs: Iterable[int] = None,
keep_protected_idxs: bool = True,
autoinfer_device: bool = True,
):
"""Fit Logistic Regression Sensitive Subspace distance metric
Parameters
--------------
data_X: torch.Tensor
Input data corresponding to either :math:`X_i` or :math:`(X_i, K_i)` in the equation above.
If the variable corresponds to :math:`X_i`, then the `y_train` parameter should be specified.
If the variable corresponds to :math:`(X_i, K_i)` then the `protected_idxs` parameter
should be specified to indicate the sensitive attributes.
data_SensitiveAttrs: torch.Tensor
Represents the sensitive attributes ( :math:`K_i` ) and is used when the `X_train` parameter
represents :math:`X_i` from the equation above. **Note**: This parameter is mutually exclusive
with the `protected_idxs` parameter. Specififying both the `data_SensitiveAttrs` and `protected_idxs`
parameters will raise an error
protected_idxs: Iterable[int]
If the `X_train` parameter above represents :math:`(X_i, K_i)`, then this parameter is used
to provide the indices of sensitive attributes in `X_train`. **Note**: This parameter is mutually exclusive
with the `protected_idxs` parameter. Specififying both the `data_SensitiveAttrs` and `protected_idxs`
parameters will raise an error
keep_protected_indices: bool
True, if while training the model, protected attributes will be part of the training data
Set to False, if for training the model, protected attributes will be excluded
Default = True
autoinfer_device: bool
Should the distance metric be automatically moved to an appropriate
device (CPU / GPU) or not? If set to True, it moves the metric
to the same device `X_train` is on. If set to False, keeps the metric
on CPU.
"""
if data_SensitiveAttrs is not None and protected_idxs is None:
basis_vectors_ = self.compute_basis_vectors_data(
X_train=data_X, y_train=data_SensitiveAttrs
)
elif data_SensitiveAttrs is None and protected_idxs is not None:
basis_vectors_ = self.compute_basis_vectors_protected_idxs(
data_X,
protected_idxs=protected_idxs,
keep_protected_idxs=keep_protected_idxs,
)
else:
raise AssertionError(
"Parameters `y_train` and `protected_idxs` are exclusive. Either of these two parameters should be None, and cannot be set to non-None values simultaneously."
)
super().fit(basis_vectors_)
self.basis_vectors_ = basis_vectors_
if autoinfer_device:
device = datautils.get_device(data_X)
super().to(device)
def compute_basis_vectors_protected_idxs(
self, data, protected_idxs, keep_protected_idxs=True
):
dtype = data.dtype
data = datautils.convert_tensor_to_numpy(data)
basis_vectors_ = []
num_attr = data.shape[1]
# Get input data excluding the protected attributes
protected_idxs = sorted(protected_idxs)
free_idxs = [idx for idx in range(num_attr) if idx not in protected_idxs]
X_train = data[:, free_idxs]
Y_train = data[:, protected_idxs]
self.__assert_sensitiveattrs_binary__(Y_train)
self._logreg_models = [
LogisticRegression(solver="liblinear", penalty="l1")
.fit(X_train, Y_train[:, idx])
for idx in range(len(protected_idxs))
]
coefs = np.array(
[
self._logreg_models[idx].coef_.squeeze()
for idx in range(len(protected_idxs))
]
) # ( |protected_idxs|, |free_idxs| )
if keep_protected_idxs:
# To keep protected indices, we add two basis vectors
# First, with logistic regression coefficients with 0 in
# protected indices. Second, with one-hot vectors with 1 in
# protected indices.
basis_vectors_ = np.empty(shape=(2 * len(protected_idxs), num_attr))
for i, protected_idx in enumerate(protected_idxs):
protected_basis_vector = np.zeros(shape=(num_attr))
protected_basis_vector[protected_idx] = 1.0
unprotected_basis_vector = np.zeros(shape=(num_attr))
np.put_along_axis(
unprotected_basis_vector, np.array(free_idxs), coefs[i], axis=0
)
basis_vectors_[2 * i] = unprotected_basis_vector
basis_vectors_[2 * i + 1] = protected_basis_vector
else:
# Protected indices are to be discarded. Therefore, we can
# simply return back the logistic regression coefficients
basis_vectors_ = coefs
basis_vectors_ = torch.tensor(basis_vectors_, dtype=dtype).T
basis_vectors_ = basis_vectors_.detach()
return basis_vectors_
def compute_basis_vectors_data(self, X_train, y_train):
dtype = X_train.dtype
X_train = datautils.convert_tensor_to_numpy(X_train)
y_train = datautils.convert_tensor_to_numpy(y_train)
self.__assert_sensitiveattrs_binary__(y_train)
basis_vectors_ = []
outdim = y_train.shape[-1]
self._logreg_models = [
LogisticRegression(solver="liblinear", penalty="l1")
.fit(X_train, y_train[:, idx])
for idx in range(outdim)
]
basis_vectors_ = np.array(
[
self._logreg_models[idx].coef_.squeeze()
for idx in range(outdim)
]
)
basis_vectors_ = torch.tensor(basis_vectors_, dtype=dtype).T
basis_vectors_ = basis_vectors_.detach()
return basis_vectors_
def __assert_sensitiveattrs_binary__(self, sensitive_attrs):
assert validationutils.is_tensor_binary(
sensitive_attrs
), "Sensitive attributes are required to be binary to learn the metric. Please binarize these attributes before fitting the metric."
| 8,395 | 39.757282 | 174 |
py
|
inFairness
|
inFairness-main/inFairness/distances/__init__.py
|
from inFairness.distances.distance import Distance
from inFairness.distances.euclidean_dists import (
EuclideanDistance,
ProtectedEuclideanDistance,
)
from inFairness.distances.sensitive_subspace_dist import (
SVDSensitiveSubspaceDistance,
SensitiveSubspaceDistance,
)
from inFairness.distances.explore_distance import EXPLOREDistance
from inFairness.distances.logistic_sensitive_subspace import (
LogisticRegSensitiveSubspace,
)
from inFairness.distances.mahalanobis_distance import (
MahalanobisDistances,
SquaredEuclideanDistance,
)
from inFairness.distances.wasserstein_distance import (
WassersteinDistance,
)
__all__ = [symb for symb in globals() if not symb.startswith("_")]
| 717 | 27.72 | 66 |
py
|
inFairness
|
inFairness-main/inFairness/distances/mahalanobis_distance.py
|
import torch
import numpy as np
from functorch import vmap
from inFairness.distances.distance import Distance
class MahalanobisDistances(Distance):
"""Base class implementing the Generalized Mahalanobis Distances
Mahalanobis distance between two points X1 and X2 is computed as:
.. math:: \\text{dist}(X_1, X_2) = (X_1 - X_2) \\Sigma (X_1 - X_2)^{T}
"""
def __init__(self):
super().__init__()
self.device = torch.device("cpu")
self._vectorized_dist = None
self.register_buffer("sigma", torch.Tensor())
def to(self, device):
"""Moves distance metric to a particular device
Parameters
------------
device: torch.device
"""
assert (
self.sigma is not None and len(self.sigma.size()) != 0
), "Please fit the metric before moving parameters to device"
self.device = device
self.sigma = self.sigma.to(self.device)
def fit(self, sigma):
"""Fit Mahalanobis Distance metric
Parameters
------------
sigma: torch.Tensor
Covariance matrix
"""
self.sigma = sigma
@staticmethod
def __compute_dist__(X1, X2, sigma):
"""Computes the distance between two data samples x1 and x2
Parameters
-----------
X1: torch.Tensor
Data sample of shape (n_features) or (N, n_features)
X2: torch.Tensor
Data sample of shape (n_features) or (N, n_features)
Returns:
dist: torch.Tensor
Distance between points x1 and x2. Shape: (N)
"""
# unsqueeze batch dimension if a vector is passed
if len(X1.shape) == 1:
X1 = X1.unsqueeze(0)
if len(X2.shape) == 1:
X2 = X2.unsqueeze(0)
X_diff = X1 - X2
dist = torch.sum((X_diff @ sigma) * X_diff, dim=-1, keepdim=True)
return dist
def __init_vectorized_dist__(self):
"""Initializes a vectorized version of the distance computation"""
if self._vectorized_dist is None:
self._vectorized_dist = vmap(
vmap(
vmap(self.__compute_dist__, in_dims=(None, 0, None)),
in_dims=(0, None, None),
),
in_dims=(0, 0, None),
)
def forward(self, X1, X2, itemwise_dist=True):
"""Computes the distance between data samples X1 and X2
Parameters
-----------
X1: torch.Tensor
Data samples from batch 1 of shape (n_samples_1, n_features)
X2: torch.Tensor
Data samples from batch 2 of shape (n_samples_2, n_features)
itemwise_dist: bool, default: True
Compute the distance in an itemwise manner or pairwise manner.
In the itemwise fashion (`itemwise_dist=False`), distance is
computed between the ith data sample in X1 to the ith data sample
in X2. Thus, the two data samples X1 and X2 should be of the same shape
In the pairwise fashion (`itemwise_dist=False`), distance is
computed between all the samples in X1 and all the samples in X2.
In this case, the two data samples X1 and X2 can be of different shapes.
Returns
----------
dist: torch.Tensor
Distance between samples of batch 1 and batch 2.
If `itemwise_dist=True`, item-wise distance is returned of
shape (n_samples, 1)
If `itemwise_dist=False`, pair-wise distance is returned of
shape (n_samples_1, n_samples_2)
"""
if itemwise_dist:
np.testing.assert_array_equal(
X1.shape,
X2.shape,
err_msg="X1 and X2 should be of the same shape for itemwise distance computation",
)
dist = self.__compute_dist__(X1, X2, self.sigma)
else:
self.__init_vectorized_dist__()
X1 = X1.unsqueeze(0) if len(X1.shape) == 2 else X1 # (B, N, D)
X2 = X2.unsqueeze(0) if len(X2.shape) == 2 else X2 # (B, M, D)
nsamples_x1 = X1.shape[1]
nsamples_x2 = X2.shape[1]
dist_shape = (-1, nsamples_x1, nsamples_x2)
dist = self._vectorized_dist(X1, X2, self.sigma).view(dist_shape)
return dist
class SquaredEuclideanDistance(MahalanobisDistances):
"""
computes the squared euclidean distance as a special case of the mahalanobis distance where:
.. math:: \\Sigma= I_{num_dims}
"""
def __init__(self):
super().__init__()
self.num_dims_ = None
def fit(self, num_dims: int):
"""Fit Square Euclidean Distance metric
Parameters
-----------------
num_dims: int
the number of dimensions of the space in which the Squared Euclidean distance will be used.
"""
self.num_dims_ = num_dims
sigma = torch.eye(self.num_dims_).detach()
super().fit(sigma)
| 5,180 | 30.785276 | 107 |
py
|
inFairness
|
inFairness-main/inFairness/distances/sensitive_subspace_dist.py
|
import numpy as np
import torch
from sklearn.decomposition import TruncatedSVD
from typing import List
from inFairness.distances.mahalanobis_distance import MahalanobisDistances
from inFairness.utils import datautils
class SensitiveSubspaceDistance(MahalanobisDistances):
"""Implements Sensitive Subspace metric base class that accepts the
basis vectors of a sensitive subspace, and computes a projection
that ignores the sensitive subspace.
The projection from the sensitive subspace basis vectors (A) is computed as:
.. math:: P^{'} = I - (A (A A^{T})^{-1} A^{T})
"""
def __init__(self):
super().__init__()
def fit(self, basis_vectors):
"""Fit Sensitive Subspace Distance metric
Parameters
--------------
basis_vectors: torch.Tensor
Basis vectors of the sensitive subspace
"""
sigma = self.compute_projection_complement(basis_vectors)
super().fit(sigma)
def compute_projection_complement(self, basis_vectors):
"""Compute the projection complement of the space
defined by the basis_vectors:
projection complement given basis vectors (A) is computed as:
.. math:: P^{'} = I - (A (A A^{T})^{-1} A^{T})
Parameters
-------------
basis_vectors: torch.Tensor
Basis vectors of the sensitive subspace
Dimension (d, k) where d is the data features dimension
and k is the number of protected dimensions
Returns
----------
projection complement: torch.Tensor
Projection complement computed as described above.
Shape (d, d) where d is the data feature dimension
"""
# Computing the orthogonal projection
# V(V V^T)^{-1} V^T
projection = torch.linalg.inv(torch.matmul(basis_vectors.T, basis_vectors))
projection = torch.matmul(basis_vectors, projection)
# Shape: (n_features, n_features)
projection = torch.matmul(projection, basis_vectors.T)
# Complement the projection as: (I - Proj)
projection_complement_ = torch.eye(projection.shape[0]) - projection
projection_complement_ = projection_complement_.detach()
return projection_complement_
class SVDSensitiveSubspaceDistance(SensitiveSubspaceDistance):
"""Sensitive Subspace metric that uses SVD to find the basis vectors of
the sensitive subspace. The metric learns a subspace from a set of
user-curated comparable data samples.
Proposed in Section B.2 of Training individually fair ML models
with sensitive subspace robustness
References
-------------
`Yurochkin, Mikhail, Amanda Bower, and Yuekai Sun. "Training individually fair
ML models with sensitive subspace robustness." arXiv preprint arXiv:1907.00020 (2019).`
"""
def __init__(self):
super().__init__()
self.n_components_ = None
def fit(self, X_train, n_components, autoinfer_device=True):
"""Fit SVD Sensitive Subspace distance metric parameters
Parameters
-------------
X_train: torch.Tensor | List[torch.Tensor]
Training data containing comparable data samples.
If only one set of comparable data samples is provided, the input
should be a torch.Tensor of shape :math:`(N, D)`. For multiple sets
of comparable data samples a list of shape
:math:`[ (N_1, D), \\cdots, (N_x, D)]` can be provided.
n_components: int
Desired number of latent variable dimensions
autoinfer_device: bool
Should the distance metric be automatically moved to an appropriate
device (CPU / GPU) or not? If set to True, it moves the metric
to the same device `X_train` is on. If set to False, keeps the metric
on CPU.
"""
self.n_components_ = n_components
basis_vectors = self.compute_basis_vectors(X_train, n_components)
super().fit(basis_vectors)
if autoinfer_device:
device = datautils.get_device(X_train)
super().to(device)
def __process_input_data__(self, X_train):
"""Process metric training data to convert from tensor to numpy and
remove the mean and concatenate if multiple sets of training data
is provided
"""
if isinstance(X_train, torch.Tensor) or isinstance(X_train, np.ndarray):
X_train = datautils.convert_tensor_to_numpy(X_train)
return X_train
if isinstance(X_train, list):
X_train = [datautils.convert_tensor_to_numpy(X) for X in X_train]
# Subtract mean and concatenate all sets of features
X_norm = np.vstack([X - np.mean(X, axis=0) for X in X_train])
return X_norm
raise TypeError(
"Provided data `X_train` should either be Tensor, np.ndarray or a list of these."
)
def compute_basis_vectors(self, X_train, n_components):
"""Compute basis vectors using SVD"""
X_train = self.__process_input_data__(X_train)
tSVD = TruncatedSVD(n_components=n_components)
tSVD.fit(X_train)
basis_vectors_ = tSVD.components_.T # Shape: (n_features, n_components)
basis_vectors_ = torch.Tensor(basis_vectors_)
return basis_vectors_
| 5,490 | 35.852349 | 95 |
py
|
inFairness
|
inFairness-main/inFairness/distances/distance.py
|
from abc import ABCMeta, abstractmethod
from torch import nn
class Distance(nn.Module, metaclass=ABCMeta):
"""
Abstract base class for model distances
"""
def __init__(self):
super().__init__()
def fit(self, **kwargs):
"""
Fits the metric parameters for learnable metrics
Default functionality is to do nothing. Subclass
should overwrite this method to implement custom fit
logic
"""
pass
def load_state_dict(self, state_dict, strict=True):
buffer_keys = [bufferitem[0] for bufferitem in self.named_buffers()]
for key, val in state_dict.items():
if key not in buffer_keys and strict:
raise AssertionError(
f"{key} not found in metric state and strict parameter is set to True. Either set strict parameter to False or remove extra entries from the state dictionary."
)
setattr(self, key, val)
@abstractmethod
def forward(self, x, y):
"""
Subclasses must override this method to compute particular distances
Returns:
Tensor: distance between two inputs
"""
| 1,196 | 28.925 | 179 |
py
|
inFairness
|
inFairness-main/inFairness/utils/datautils.py
|
from typing import Iterable
import torch
import numpy as np
from itertools import product
def generate_data_pairs(n_pairs, datasamples_1, datasamples_2=None, comparator=None):
"""Utility function to generate (in)comparable data pairs given data samples. Use case includes
creating a dataset of comparable and incomparable data for the EXPLORE distance metric which
learns from such data samples.
Parameters
------------
n_pairs: int
Number of pairs to construct
datasamples_1: numpy.ndarray
Array of data samples of shape (N_1, *)
datasamples_2: numpy.ndarray
(Optional) array of data samples of shape (N_2, *).
If datasamples_2 is provided, then data pairs are constructed between
datasamples_1 and datasamples_2.
If datasamples_2 is not provided, then data pairs are constructed within
datasamples_1
comparator: function
A lambda function that given two samples returns True if they should
be paired, and False if not.
If `comparator` is not defined, then random data samples are paired together.
Example: `comparator = lambda x, y: (x == y)`
Returns
----------
idxs: numpy.ndarray
A (n_pairs, 2) shaped array with indices of data sample pairs
"""
if datasamples_2 is None:
datasamples_2 = datasamples_1
nsamples_1 = datasamples_1.shape[0]
nsamples_2 = datasamples_2.shape[0]
if comparator is None:
ntotal = nsamples_1 * nsamples_2
assert (
n_pairs <= ntotal
), f"Number of desired data pairs {n_pairs} is greater than possible combinations {ntotal}"
idxs = np.random.choice(ntotal, n_pairs, replace=False)
idxs1, idxs2 = np.unravel_index(idxs, shape=(nsamples_1, nsamples_2))
idxs = np.stack((idxs1, idxs2), axis=-1)
else:
all_idxs = [
(idx1, idx2)
for idx1, idx2 in product(range(nsamples_1), range(nsamples_2))
if comparator(datasamples_1[idx1], datasamples_2[idx2])
]
assert n_pairs <= len(all_idxs), (
f"Number of desired data pairs {n_pairs} is greater than possible "
+ "combinations {len(all_idxs)}"
)
idx_positions = np.random.choice(len(all_idxs), n_pairs, replace=False)
idxs = np.array([all_idxs[x] for x in idx_positions])
return idxs
def convert_tensor_to_numpy(tensor):
"""Converts a PyTorch tensor to numpy array
If the provided `tensor` is not a PyTorch tensor, it returns the same object back
with no modifications
Parameters
-----------
tensor: torch.Tensor
Tensor to be converted to numpy array
Returns
----------
array_np: numpy.ndarray
Numpy array of the provided tensor
"""
if torch.is_tensor(tensor):
array_np = tensor.detach().cpu().numpy()
return array_np
return tensor
def include_exclude_terms(
data_terms: Iterable[str], include: Iterable[str] = (), exclude: Iterable[str] = ()
):
"""
given a set of data terms, return a resulting set depending on specified included and excluded terms.
Parameters
-----------
data_terms: string iterable
set of terms to be filtered
include: string iterable
set of terms to be included, if not specified all data_terms are included
exclude: string iterable
set of terms to be excluded from data_terms
Returns
----------
terms: list of strings
resulting terms in alphabetical order.
"""
terms = set(include) if len(include) > 0 else set(data_terms)
if len(exclude) > 0:
terms = terms.difference(set(exclude))
terms = sorted(list(terms))
return terms
def get_device(obj):
"""Returns a device (cpu/cuda) based on the type of the reference object
Parameters
-------------
obj: torch.Tensor
"""
device = torch.device("cpu")
# If reference object is a tensor, use its device
if torch.is_tensor(obj):
device = obj.device
# If reference object is a list, check if first element is a tensor
# and if it is a tensor, use it's device
if isinstance(obj, list) and torch.is_tensor(obj[0]):
device = obj[0].device
return device
| 4,492 | 31.092857 | 105 |
py
|
inFairness
|
inFairness-main/inFairness/utils/misc.py
|
from functools import wraps
import inspect
def initializer(func):
"""
Automatically assigns the parameters.
>>> class process:
... @initializer
... def __init__(self, cmd, reachable=False, user='root'):
... pass
>>> p = process('halt', True)
>>> p.cmd, p.reachable, p.user
('halt', True, 'root')
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
(
names,
varargs,
varkw,
defaults,
kwonlyargs,
kwonlydefaults,
annotations,
) = inspect.getfullargspec(func)
for name, arg in list(zip(names[1:], args)) + list(kwargs.items()):
setattr(self, name, arg)
for name, default in zip(reversed(names), reversed(defaults)):
if not hasattr(self, name):
setattr(self, name, default)
func(self, *args, **kwargs)
return wrapper
| 953 | 23.461538 | 75 |
py
|
inFairness
|
inFairness-main/inFairness/utils/ndcg.py
|
import torch
from functorch import vmap
def discounted_cumulative_gain(relevances):
numerator = torch.pow(torch.tensor([2.0]), relevances)
denominator = torch.log2(torch.arange(len(relevances), dtype=torch.float) + 2)
return (numerator / denominator).sum()
def normalized_discounted_cumulative_gain(relevances):
"""Takes a vector of relevances and computes the normalized discounted cumulative gain
Refer (Wikipedia - Discounted Cumulative Gain)[https://en.wikipedia.org/wiki/Discounted_cumulative_gain]
for more information.
Parameters
---------
relevances: torch.Tensor
vector of dimension N where each element is the relevance of some objects in a particular order
Returns
-------
normalized_discounted_cumulative_gain: torch.Tensor
scalar value corresponding to the normalized discounted cumulative gain
"""
dcg = discounted_cumulative_gain(relevances)
sorted_rels, _ = torch.sort(relevances, descending=True)
idcg = discounted_cumulative_gain(sorted_rels)
return dcg / idcg
"""
vectorizes :func: `normalized_discounted_cumulative_gain` to work on a batch of vectors of relevances
given in a tensor of dimensions B,N. The output would be the NDCG on the last dimmension. And it's batched
version would return B samples.
"""
vect_normalized_discounted_cumulative_gain = vmap(
normalized_discounted_cumulative_gain, in_dims=(0)
)
"""
Adds a further outer dimension to the vectorized normalized discounted cumulative gain so it works
on monte carlo samples of rankings (e.g. samples of a plackett-luce distribution).
This function would take a tensor of size S,B,N and return a tensor of size S,B with the
ndcg of each vector.
"""
monte_carlo_vect_ndcg = vmap(vect_normalized_discounted_cumulative_gain, in_dims=(0,))
| 1,822 | 36.204082 | 108 |
py
|
inFairness
|
inFairness-main/inFairness/utils/validationutils.py
|
import numpy as np
def is_tensor_binary(data: np.ndarray):
"""Checks if the data is binary (0/1) or not. Return True if it is binary data
Parameters
--------------
data: np.ndarray
Data to validata if binary or not
Returns
----------
is_binary: bool
True if data is binary. False if not
"""
nonbindata = (data != 0) & (data != 1)
has_nonbin_data = True in nonbindata
return not has_nonbin_data
| 475 | 21.666667 | 82 |
py
|
inFairness
|
inFairness-main/inFairness/utils/plackett_luce.py
|
"""
This file implements Plackett-Luce distribution and is taken from the
following source:
Source: Github PyTorch PR#50362 - Add Plackett-Luce Distribution
URL: https://github.com/pytorch/pytorch/pull/50362/
Author: Jeremy Salwen (https://github.com/jeremysalwen)
"""
from typing import Optional
import torch
from torch.distributions import Distribution, constraints
class PlackettLuce(Distribution):
"""
Creates a Plackett-Luce distribution over permutations, parameterized by :attr: `logits`.
The Plackett-Luce distribution defines a probability distribution over permutations by assigning a score `a_i` to
each element, and repeatedly choosing the next element by sampling from the remaining elements with a probability
proportional to their score.
If :attr:`logits` is 1-dimensional with length-`K`, each element is the log-score of the object at that index.
If :attr:`logits` is N-dimensional, the first N-1 dimensions are treated as a batch of log-score vectors.
This distribution supports batched operations with permutations of different sizes, by using the :attr:
`permutation_sizes` attribute to specify the permutation size of each score vector in the batch. If the
permutation_size is `N` for a given index of the batch, the first `N` entries of the resulting sample will be a
permutation of the number `1` through `N`, while the remainder have unspecified values.
Example::
>>> m = PlackettLuce(torch.tensor([[0, 1, -1], [0, 1, 2]]), torch.tensor([3, 2], dtype=torch.int64))
>>> m.sample()
tensor([[ 1, 0, 2],
[ 0, 1, 2]])
Args:
logits (Tensor): The log of the Plackett-Luce distribution scores `a_i`.
permutation_sizes (Tensor): Optional sizes of the permutations sampled by the distribution. Should match the
shape of the logits, with the last dimension stripped.
"""
arg_constraints = {"logits": constraints.real}
support = constraints.integer_interval(-1, torch.iinfo(torch.int64).max)
def __init__(
self,
logits: torch.Tensor,
permutation_sizes: Optional[torch.Tensor] = None,
validate_args=None,
):
batch_shape = logits.shape[:-1]
max_size = logits.shape[-1]
if permutation_sizes is None:
permutation_sizes = torch.full(
batch_shape, max_size, dtype=torch.int64, device=logits.device
)
else:
permutation_sizes = permutation_sizes.expand(batch_shape)
if validate_args:
if (logits < -1e30).any():
raise ValueError(
"Plackett-Luce implementation cannot handle logits less than -1e30"
)
self.logits = logits
self.permutation_sizes = permutation_sizes
# Mask is true for invalid indices
self.mask: torch.Tensor = (
torch.zeros(*batch_shape, max_size + 1, device=logits.device)
.scatter(-1, permutation_sizes.unsqueeze(-1), 1)[..., :-1]
.cumsum(dim=-1)
.bool()
)
event_shape = torch.Size((max_size,))
super(PlackettLuce, self).__init__(
batch_shape, event_shape, validate_args=validate_args
)
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
expanded = self.logits.expand(*sample_shape, *[-1] * len(self.logits.shape))
gumbel_noise = -torch.log(-torch.log(torch.rand_like(expanded)))
scores = torch.where(self.mask, -1e35, expanded + gumbel_noise)
sorted_scores, indices = torch.sort(scores, dim=-1, descending=True)
return indices.masked_fill(self.mask, -1).detach()
def log_prob(self, value: torch.Tensor):
if self._validate_args:
self._validate_sample(value)
return _plackett_luce_log_prob(
self.logits, self.permutation_sizes, self.mask, value
)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(PlackettLuce, _instance)
batch_shape = torch.Size(batch_shape)
logits_shape = batch_shape + (self.logits.shape[-1],)
new.logits = self.logits.expand(logits_shape)
new.mask = self.mask.expand(logits_shape)
new.permutation_sizes = self.permutation_sizes.expand(batch_shape)
super(PlackettLuce, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def _validate_sample(self, value: torch.Tensor):
super()._validate_sample(value)
max_int64 = torch.iinfo(torch.int64).max
if (
value.masked_fill(self.mask, max_int64).sort(-1).values
!= torch.arange(0, value.shape[-1], dtype=torch.int64).masked_fill(
self.mask, max_int64
)
).any():
raise ValueError("Not a valid permutation or batch of permutations.")
@torch.jit.script_if_tracing
def _plackett_luce_log_prob(logits, permutation_sizes, mask, value):
value = value.masked_fill(mask, 0)
logits = logits.masked_fill(mask, -1e35).expand(value.shape)
log_probs = torch.zeros(value.shape[:-1], device=value.device)
for i in range(int(permutation_sizes.max())):
log_probs += torch.where(
mask[..., i],
0.0,
logits.log_softmax(dim=-1).gather(-1, value[..., i : i + 1]).squeeze(-1),
)
logits = logits.scatter(-1, value[..., i : i + 1], -1e35)
return log_probs
| 5,646 | 39.335714 | 117 |
py
|
inFairness
|
inFairness-main/inFairness/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
inFairness
|
inFairness-main/inFairness/utils/params.py
|
import torch.nn
def freeze_network(network: torch.nn.Module):
"""Freeze network parameters.
:param network: torch.nn.Module
:type network: torch.nn.Module
"""
for p in network.parameters():
p.requires_grad = False
def unfreeze_network(network: torch.nn.Module):
"""Unfreeze network parameters.
:param network: torch.nn.Module
:type network: torch.nn.Module
"""
for p in network.parameters():
p.requires_grad = True
| 475 | 22.8 | 47 |
py
|
inFairness
|
inFairness-main/inFairness/utils/postprocessing.py
|
import torch
def build_graph_from_dists(
dists: torch.Tensor,
scale: float = None,
threshold: float = None,
normalize: bool = False,
):
"""Build the adjacency matrix `W` given distances
Parameters
-------------
dists: torch.Tensor
Distance matrix between data points. Shape: (N, N)
scale: float
Parameter used to scale the computed distances
threshold: float
Parameter used to determine if two data points are connected or not.
Distances below threshold value are connected, and beyond
threshold value are disconnected.
normalize: bool
Whether to normalize the adjacency matrix or not
Returns
----------
W: torch.Tensor
Adjancency matrix. It contains data points which are connected
to atleast one other datapoint. Isolated datapoints, or ones which
are not connected to any other datapoints, are not included in the
adjancency matrix.
idxs_in: torch.Tensor
Indices of data points which are included in the adjacency matrix
"""
scale = 1.0 if scale is None else scale
threshold = 1e10 if threshold is None else threshold
W = torch.exp(-(dists * scale)) * (torch.sqrt(dists) < threshold)
idxs_in = torch.where(W.sum(axis=1) > 0.0)[0]
W = W[idxs_in]
W = W[:, idxs_in]
if normalize:
D_inv_sqrt = 1.0 / torch.sqrt(W.sum(axis=1))
W = W * D_inv_sqrt * D_inv_sqrt.view(-1, 1)
return W, idxs_in
def get_laplacian(W: torch.Tensor, normalize: bool = False):
"""Get the Laplacian of the matrix `W`
Parameters
-------------
W: torch.Tensor
Adjacency matrix of shape (N, N)
normalize: bool
Whether to normalize the computed laplacian or not
Returns
-------------
Laplacian: torch.Tensor
Laplacian of the adjacency matrix
"""
D = W.sum(axis=1)
L = torch.diag(D) - W
if normalize:
L = L / D.view(-1, 1)
return L
def laplacian_solve(L: torch.Tensor, y_hat: torch.Tensor, lambda_param: float = None):
"""Solves a system of linear equations to get the post-processed output.
The system of linear equation it solves is:
:math:`\hat{{f}} = {(I + \lambda * L)}^{-1} \hat{y}`
Parameters
------------
L: torch.Tensor
Laplacian matrix
y_hat: torch.Tensor
Model predicted output class probabilities
lambda_param: float
Weight for the laplacian regularizer
Returns
----------
y: torch.Tensor
Post-processed solution according to the equation above
"""
lambda_param = 1.0 if lambda_param is None else lambda_param
n = L.shape[0]
y = torch.linalg.solve(lambda_param * L + torch.eye(n), y_hat)
return y
| 2,900 | 27.441176 | 86 |
py
|
inFairness
|
inFairness-main/inFairness/auditor/sensr_auditor.py
|
import torch
from torch.nn import Parameter
from inFairness.auditor import Auditor
from inFairness.utils.params import freeze_network, unfreeze_network
from inFairness.utils.datautils import get_device
class SenSRAuditor(Auditor):
"""SenSR Auditor implements the functionality to generate worst-case examples
by solving the following optimization equation:
.. math:: x_{t_b}^* \gets arg\max_{x \in X} l((x,y_{t_b}),h) - \lambda d_x^2(x_{t_b},x)
Proposed in `Training individually fair ML models with sensitive subspace robustness <https://arxiv.org/abs/1907.00020>`_
Parameters
--------------
loss_fn: torch.nn.Module
Loss function
distance_x: inFairness.distances.Distance
Distance metric in the input space
num_steps: int
Number of update steps should the auditor perform to find worst-case examples
lr: float
Learning rate
"""
def __init__(
self, loss_fn, distance_x, num_steps, lr, max_noise=0.1, min_noise=-0.1
):
self.loss_fn = loss_fn
self.distance_x = distance_x
self.num_steps = num_steps
self.lr = lr
self.max_noise = max_noise
self.min_noise = min_noise
super().__init__()
def generate_worst_case_examples(self, network, x, y, lambda_param, optimizer=None):
"""Generate worst case example given the input data sample batch `x`
Parameters
------------
network: torch.nn.Module
PyTorch network model
x: torch.Tensor
Batch of input datapoints
y: torch.Tensor
Batch of output datapoints
lambda_param: float
Lambda weighting parameter as defined in the equation above
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object
Returns
---------
X_worst: torch.Tensor
Worst case examples for the provided input datapoints
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
freeze_network(network)
lambda_param = lambda_param.detach()
delta = Parameter(
torch.rand_like(x) * (self.max_noise - self.min_noise) + self.min_noise
)
if optimizer is None:
optimizer = torch.optim.Adam([delta], lr=self.lr)
else:
optimizer = optimizer([delta], lr=self.lr)
for _ in range(self.num_steps):
optimizer.zero_grad()
x_worst = x + delta
input_dist = self.distance_x(x, x_worst)
out_x_worst = network(x_worst)
out_dist = self.loss_fn(out_x_worst, y)
audit_loss = -(out_dist - lambda_param * input_dist)
audit_loss.mean().backward()
optimizer.step()
unfreeze_network(network)
return (x + delta).detach()
def audit(
self,
network,
X_audit,
Y_audit,
audit_threshold=None,
lambda_param=None,
confidence=0.95,
optimizer=None,
):
"""Audit a model for individual fairness
Parameters
------------
network: torch.nn.Module
PyTorch network model
X_audit: torch.Tensor
Auditing data samples. Shape: (B, *)
Y_audit: torch.Tensor
Auditing data samples. Shape: (B)
loss_fn: torch.nn.Module
Loss function
audit_threshold: float, optional
Auditing threshold to consider a model individually fair or not
If `audit_threshold` is specified, the `audit` procedure determines
if the model is individually fair or not.
If `audit_threshold` is not specified, the `audit` procedure simply
returns the mean and lower bound of loss ratio, leaving the determination
of models' fairness to the user.
Default=None
lambda_param: float
Lambda weighting parameter as defined in the equation above
confidence: float, optional
Confidence value. Default = 0.95
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object. Default: torch.optim.SGD
Returns
------------
audit_response: inFairness.auditor.datainterface.AuditorResponse
Audit response containing test statistics
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
device = get_device(X_audit)
if lambda_param is None:
lambda_param = torch.tensor(1.0, device=device)
if isinstance(lambda_param, float):
lambda_param = torch.tensor(lambda_param, device=device)
if optimizer is None:
optimizer = torch.optim.SGD
X_worst = self.generate_worst_case_examples(
network=network,
x=X_audit,
y=Y_audit,
lambda_param=lambda_param,
optimizer=optimizer,
)
loss_ratio = self.compute_loss_ratio(
X_audit=X_audit,
X_worst=X_worst,
Y_audit=Y_audit,
network=network,
loss_fn=self.loss_fn,
)
audit_response = self.compute_audit_result(
loss_ratio, audit_threshold, confidence
)
return audit_response
| 5,871 | 32.175141 | 125 |
py
|
inFairness
|
inFairness-main/inFairness/auditor/sensei_auditor.py
|
import torch
from torch.nn import Parameter
from inFairness.auditor.auditor import Auditor
from inFairness.utils.params import freeze_network, unfreeze_network
from inFairness.utils.datautils import get_device
class SenSeIAuditor(Auditor):
"""SenSeI Auditor implements the functionality to generate worst-case examples
by solving the following optimization equation:
.. math:: x_{t_b}' \gets arg\max_{x' \in X}\{d_{Y}(h_{\\theta_t}(X_{t_b}),h_{\\theta_t}(x')) - \lambda_t d_{X}(X_{t_b},x')\}
Proposed in `SenSeI: Sensitive Set Invariance for Enforcing Individual Fairness <https://arxiv.org/abs/2006.14168>`_
Parameters
--------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
distance_y: inFairness.distances.Distance
Distance metric in the output space
num_steps: int
Number of update steps should the auditor perform to find worst-case examples
lr: float
Learning rate
"""
def __init__(
self, distance_x, distance_y, num_steps, lr, max_noise=0.1, min_noise=-0.1
):
self.distance_x = distance_x
self.distance_y = distance_y
self.num_steps = num_steps
self.lr = lr
self.max_noise = max_noise
self.min_noise = min_noise
super().__init__()
def generate_worst_case_examples(self, network, x, lambda_param, optimizer=None):
"""Generate worst case example given the input data sample batch `x`
Parameters
------------
network: torch.nn.Module
PyTorch network model
x: torch.Tensor
Batch of input datapoints
lambda_param: float
Lambda weighting parameter as defined in the equation above
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object. Default: torch.optim.Adam
Returns
---------
X_worst: torch.Tensor
Worst case examples for the provided input datapoints
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
freeze_network(network)
lambda_param = lambda_param.detach()
delta = Parameter(
torch.rand_like(x) * (self.max_noise - self.min_noise) + self.min_noise
)
if optimizer is None:
optimizer = torch.optim.Adam([delta], lr=self.lr)
else:
optimizer = optimizer([delta], lr=self.lr)
for _ in range(self.num_steps):
optimizer.zero_grad()
x_worst = x + delta
input_dist = self.distance_x(x, x_worst)
out_x = network(x)
out_x_worst = network(x_worst)
out_dist = self.distance_y(out_x, out_x_worst)
audit_loss = -(out_dist - lambda_param * input_dist)
audit_loss.sum().backward()
optimizer.step()
unfreeze_network(network)
return (x + delta).detach()
def audit(
self,
network,
X_audit,
Y_audit,
loss_fn,
audit_threshold=None,
lambda_param=None,
confidence=0.95,
optimizer=None,
):
"""Audit a model for individual fairness
Parameters
------------
network: torch.nn.Module
PyTorch network model
X_audit: torch.Tensor
Auditing data samples. Shape: (B, *)
Y_audit: torch.Tensor
Auditing data samples. Shape: (B)
loss_fn: torch.nn.Module
Loss function
audit_threshold: float, optional
Auditing threshold to consider a model individually fair or not
If `audit_threshold` is specified, the `audit` procedure determines
if the model is individually fair or not.
If `audit_threshold` is not specified, the `audit` procedure simply
returns the mean and lower bound of loss ratio, leaving the determination
of models' fairness to the user.
Default=None
lambda_param: float
Lambda weighting parameter as defined in the equation above
confidence: float, optional
Confidence value. Default = 0.95
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object. Default: torch.optim.SGD
Returns
------------
audit_response: inFairness.auditor.datainterface.AuditorResponse
Audit response containing test statistics
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
device = get_device(X_audit)
if lambda_param is None:
lambda_param = torch.tensor(1.0, device=device)
elif isinstance(lambda_param, float):
lambda_param = torch.tensor(lambda_param, device=device)
if optimizer is None:
optimizer = torch.optim.SGD
X_worst = self.generate_worst_case_examples(
network=network, x=X_audit, lambda_param=lambda_param, optimizer=optimizer
)
loss_ratio = self.compute_loss_ratio(
X_audit=X_audit,
X_worst=X_worst,
Y_audit=Y_audit,
network=network,
loss_fn=loss_fn,
)
audit_response = self.compute_audit_result(
loss_ratio, audit_threshold, confidence
)
return audit_response
| 5,904 | 33.331395 | 128 |
py
|
inFairness
|
inFairness-main/inFairness/auditor/datainterface.py
|
import torch
from dataclasses import dataclass
@dataclass
class AuditorResponse:
"""Class to store a result from the auditor"""
lossratio_mean: float = None
lossratio_std: float = None
lower_bound: float = None
threshold: float = None
pval: float = None
confidence: float = None
is_model_fair: bool = None
| 342 | 19.176471 | 50 |
py
|
inFairness
|
inFairness-main/inFairness/auditor/senstir_auditor.py
|
import torch
from torch.nn.parameter import Parameter
from inFairness.distances import (
WassersteinDistance,
MahalanobisDistances,
)
from inFairness.auditor import Auditor
from inFairness.utils.params import freeze_network, unfreeze_network
class SenSTIRAuditor(Auditor):
"""SenSTIR Auditor generates worst-case examples by solving the
following optimization problem:
.. math:: q^{'} \gets arg\max_{q^{'}}\{||h_{\\theta_t}(q),h_{\\theta_t}(q^{'})||_{2}^{2} - \lambda_t d_{Q}(q,q^{'})\}
At a high level, it will find :math:`q^{'}` such that it maximizes the score difference, while keeping
a fair set distance `distance_q` with the original query `q` small.
Proposed in `Individually Fair Rankings <https://arxiv.org/abs/2103.11023>`_
Parameters
-----------
distance_x: inFairness.distances.Distance
Distance metric in the input space. Should be an instance of
:class:`~inFairness.distances.MahalanobisDistance`
distance_y: inFairness.distances.Distance
Distance metric in the output space. Should be an instance of
:class:`~inFairness.distances.MahalanobisDistance`
num_steps: int
number of optimization steps taken to produce the worst examples.
lr: float
learning rate of the optimization
max_noise: float
range of a uniform distribution determining the initial noise added to q to form q'
min_noise: float
range of a uniform distribution determining the initial noise added to q to form q'
"""
def __init__(
self,
distance_x: MahalanobisDistances,
distance_y: MahalanobisDistances,
num_steps: int,
lr: float,
max_noise: float = 0.1,
min_noise: float = -0.1,
):
self.distance_x = distance_x
self.distance_y = distance_y
self.num_steps = num_steps
self.lr = lr
self.max_noise = max_noise
self.min_noise = min_noise
self.distance_q = self.__init_query_distance__()
def __init_query_distance__(self):
"""Initialize Wasserstein distance metric from provided input distance metric"""
sigma_ = self.distance_x.sigma
distance_q = WassersteinDistance()
distance_q.fit(sigma=sigma_)
return distance_q
def generate_worst_case_examples(self, network, Q, lambda_param, optimizer=None):
"""Generate worst case examples given the input sample batch of queries Q (dimensions batch_size,num_items,num_features)
Parameters
-----------
network: torch.nn.Module
PyTorch network model that outputs scores per item
Q: torch.Tensor
tensor with dimensions batch_size, num_items, num_features containing the batch of queries for ranking
lambda_param: torch.float
Lambda weighting parameter as defined above
optimizer: torch.optim.Optimizer, optional
Pytorch Optimizer object
Returns
---------
q_worst: torch.Tensor
worst case queries for the provided input queries `Q`
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer)
batch_size, num_items, _ = Q.shape
freeze_network(network)
lambda_param = lambda_param.detach()
delta = Parameter(
torch.rand_like(Q) * (self.max_noise - self.min_noise) + self.min_noise
)
if optimizer is None:
optimizer = torch.optim.Adam([delta], lr=self.lr)
else:
optimizer = optimizer([delta], lr=self.lr)
for _ in range(self.num_steps):
optimizer.zero_grad()
Q_worst = Q + delta
input_dist = self.distance_q(Q, Q_worst) # this is of size B
out_Q = network(Q).reshape(
batch_size, num_items
) # shape B,N,1 scores --> B,N
out_Q_worst = network(Q_worst).reshape(batch_size, num_items)
out_dist = self.distance_y(out_Q, out_Q_worst)
out_dist = out_dist.reshape(
-1
) # distance_y outputs B,1 whereas input_dist is B.
loss = (-(out_dist - lambda_param * input_dist)).sum()
loss.backward()
optimizer.step()
unfreeze_network(network)
return (Q + delta).detach()
| 4,386 | 34.096 | 128 |
py
|
inFairness
|
inFairness-main/inFairness/auditor/auditor.py
|
import torch
import numpy as np
from abc import ABCMeta
from scipy.stats import norm
from inFairness.utils.datautils import convert_tensor_to_numpy
from inFairness.auditor.datainterface import AuditorResponse
class Auditor(metaclass=ABCMeta):
"""
Abstract class for model auditors, e.g. Sensei or Sensr
"""
def __init__(self):
super(Auditor, self).__init__()
def generate_worst_case_examples(self, *args, **kwargs):
"""Generates worst-case example for the input data sample batch"""
raise NotImplementedError(
"Method `generate_worst_case_examples` not implemented."
)
def compute_loss_ratio(self, X_audit, X_worst, Y_audit, network, loss_fn):
"""Compute the loss ratio of samples computed by solving gradient flow attack
to original audit samples
Parameters
-------------
X_audit: torch.Tensor
Auditing samples. Shape (n_samples, n_features)
Y_audit: torch.Tensor
Labels of auditing samples. Shape: (n_samples)
lambda_param: float
Lambda weighting parameter as defined in the equation above
Returns
---------
loss_ratios: numpy.ndarray
Ratio of loss for samples computed using gradient
flow attack to original audit samples
"""
with torch.no_grad():
Y_pred_worst = network(X_worst)
Y_pred_original = network(X_audit)
loss_vals_adversarial = loss_fn(Y_pred_worst, Y_audit, reduction="none")
loss_vals_original = loss_fn(Y_pred_original, Y_audit, reduction="none")
loss_vals_adversarial = convert_tensor_to_numpy(loss_vals_adversarial)
loss_vals_original = convert_tensor_to_numpy(loss_vals_original)
loss_ratio = np.divide(loss_vals_adversarial, loss_vals_original)
return loss_ratio
def compute_audit_result(self, loss_ratios, threshold=None, confidence=0.95):
"""Computes auditing statistics given loss ratios and user-specified
acceptance threshold
Parameters
-------------
loss_ratios: numpy.ndarray
List of loss ratios between worst-case and normal data samples
threshold: float. optional
User-specified acceptance threshold value
If a value is not specified, the procedure simply returns the mean
and lower bound of loss ratio, leaving the detemination of models'
fairness to the user.
If a value is specified, the procedure also determines if the model
is individually fair or not.
confidence: float, optional
Confidence value. Default = 0.95
Returns
----------
audit_result: AuditorResponse
Data interface with auditing results and statistics
"""
loss_ratios = loss_ratios[np.isfinite(loss_ratios)]
lossratio_mean = np.mean(loss_ratios)
lossratio_std = np.std(loss_ratios)
N = loss_ratios.shape[0]
z = norm.ppf(confidence)
lower_bound = lossratio_mean - z * lossratio_std / np.sqrt(N)
if threshold is None:
response = AuditorResponse(
lossratio_mean=lossratio_mean,
lossratio_std=lossratio_std,
lower_bound=lower_bound,
)
else:
tval = (lossratio_mean - threshold) / lossratio_std
tval *= np.sqrt(N)
pval = 1 - norm.cdf(tval)
is_model_fair = False if pval < (1 - confidence) else True
response = AuditorResponse(
lossratio_mean=lossratio_mean,
lossratio_std=lossratio_std,
lower_bound=lower_bound,
threshold=threshold,
pval=pval,
confidence=confidence,
is_model_fair=is_model_fair,
)
return response
def audit(self, *args, **kwargs):
"""Audit model for individual fairness"""
raise NotImplementedError("Method not implemented")
| 4,199 | 34.294118 | 85 |
py
|
inFairness
|
inFairness-main/inFairness/auditor/__init__.py
|
from inFairness.auditor.auditor import Auditor
from inFairness.auditor.sensei_auditor import SenSeIAuditor
from inFairness.auditor.sensr_auditor import SenSRAuditor
from inFairness.auditor.senstir_auditor import SenSTIRAuditor
__all__ = [symb for symb in globals() if not symb.startswith("_")]
| 295 | 41.285714 | 66 |
py
|
inFairness
|
inFairness-main/inFairness/fairalgo/datainterfaces.py
|
import torch
from dataclasses import dataclass
@dataclass
class FairModelResponse:
"""Class to store a result from the fairmodel algorithm"""
loss: torch.Tensor = None
y_pred: torch.Tensor = None
| 212 | 16.75 | 62 |
py
|
inFairness
|
inFairness-main/inFairness/fairalgo/sensei.py
|
import torch
from torch import nn
from inFairness.auditor import SenSeIAuditor
from inFairness.fairalgo.datainterfaces import FairModelResponse
from inFairness.utils import datautils
class SenSeI(nn.Module):
"""Implementes the Sensitive Set Invariane (SenSeI) algorithm.
Proposed in `SenSeI: Sensitive Set Invariance for Enforcing Individual Fairness <https://arxiv.org/abs/2006.14168>`_
Parameters
------------
network: torch.nn.Module
Network architecture
distance_x: inFairness.distances.Distance
Distance metric in the input space
distance_y: inFairness.distances.Distance
Distance metric in the output space
loss_fn: torch.nn.Module
Loss function
rho: float
:math:`\\rho` parameter in the SenSR algorithm
eps: float
:math:`\epsilon` parameter in the SenSR algorithm
auditor_nsteps: int
Number of update steps for the auditor to find worst-case examples
auditor_lr: float
Learning rate for the auditor
"""
def __init__(
self,
network,
distance_x,
distance_y,
loss_fn,
rho,
eps,
auditor_nsteps,
auditor_lr,
):
super().__init__()
self.distance_x = distance_x
self.distance_y = distance_y
self.network = network
self.loss_fn = loss_fn
self.lamb = None
self.rho = rho
self.eps = eps
self.auditor_nsteps = auditor_nsteps
self.auditor_lr = auditor_lr
self.auditor = self.__init_auditor__()
def __init_auditor__(self):
auditor = SenSeIAuditor(
distance_x=self.distance_x,
distance_y=self.distance_y,
num_steps=self.auditor_nsteps,
lr=self.auditor_lr,
)
return auditor
def forward_train(self, X, Y):
"""Forward method during the training phase"""
device = datautils.get_device(X)
minlambda = torch.tensor(1e-5, device=device)
if self.lamb is None:
self.lamb = torch.tensor(1.0, device=device)
if type(self.eps) is float:
self.eps = torch.tensor(self.eps, device=device)
Y_pred = self.network(X)
X_worst = self.auditor.generate_worst_case_examples(
self.network, X, lambda_param=self.lamb
)
dist_x = self.distance_x(X, X_worst)
mean_dist_x = dist_x.mean()
lr_factor = torch.maximum(mean_dist_x, self.eps) / torch.minimum(mean_dist_x, self.eps)
self.lamb = torch.max(
torch.stack(
[minlambda, self.lamb + lr_factor * (mean_dist_x - self.eps)]
)
)
Y_pred_worst = self.network(X_worst)
fair_loss = torch.mean(
self.loss_fn(Y_pred, Y) + self.rho * self.distance_y(Y_pred, Y_pred_worst)
)
response = FairModelResponse(loss=fair_loss, y_pred=Y_pred)
return response
def forward_test(self, X):
"""Forward method during the test phase"""
Y_pred = self.network(X)
response = FairModelResponse(y_pred=Y_pred)
return response
def forward(self, X, Y=None, *args, **kwargs):
"""Defines the computation performed at every call.
Parameters
------------
X: torch.Tensor
Input data
Y: torch.Tensor
Expected output data
Returns
----------
output: torch.Tensor
Model output
"""
if self.training:
return self.forward_train(X, Y)
else:
return self.forward_test(X)
| 3,743 | 27.580153 | 120 |
py
|
inFairness
|
inFairness-main/inFairness/fairalgo/__init__.py
|
from inFairness.fairalgo.sensei import SenSeI
from inFairness.fairalgo.sensr import SenSR
from inFairness.fairalgo.senstir import SenSTIR
from inFairness.fairalgo.datainterfaces import FairModelResponse
__all__ = [symb for symb in globals() if not symb.startswith("_")]
| 271 | 37.857143 | 66 |
py
|
inFairness
|
inFairness-main/inFairness/fairalgo/senstir.py
|
import torch
from torch import nn
from functorch import vmap
from inFairness.auditor import SenSTIRAuditor
from inFairness.distances.mahalanobis_distance import MahalanobisDistances
from inFairness.fairalgo.datainterfaces import FairModelResponse
from inFairness.utils import datautils
from inFairness.utils.plackett_luce import PlackettLuce
from inFairness.utils.ndcg import monte_carlo_vect_ndcg
class SenSTIR(nn.Module):
"""Implementes the Sensitive Subspace Robustness (SenSR) algorithm.
Proposed in `Individually Fair Ranking <https://arxiv.org/abs/2103.11023>`_
Parameters
------------
network: torch.nn.Module
Network architecture
distance_x: inFairness.distances.Distance
Distance metric in the input space
distance_y: inFairness.distances.Distance
Distance metric in the output space
rho: float
:math:`\\rho` parameter in the SenSTIR algorithm (see Algorithm 1)
eps: float
:math:`\\epsilon` parameter in the SenSTIR algorithm (see Algorithm 1)
auditor_nsteps: int
Number of update steps for the auditor to find worst-case examples
auditor_lr: float
Learning rate for the auditor
monte_carlo_samples_ndcg: int
Number of monte carlo samples required to estimate the gradient of the
empirical version of expectation defined in equation SenSTIR in the reference
"""
def __init__(
self,
network: torch.nn.Module,
distance_x: MahalanobisDistances,
distance_y: MahalanobisDistances,
rho: float,
eps: float,
auditor_nsteps: int,
auditor_lr: float,
monte_carlo_samples_ndcg: int,
):
super().__init__()
self.network = network
self.distance_x = distance_x
self.distance_y = distance_y
self.rho = rho
self.eps = eps
self.auditor_nsteps = auditor_nsteps
self.auditor_lr = auditor_lr
self.monte_carlo_samples_ndcg = monte_carlo_samples_ndcg
self.lamb = None
self.auditor, self.distance_q = self.__init_auditor__()
self._vect_gather = vmap(torch.gather, (None, None, 0))
def __init_auditor__(self):
auditor = SenSTIRAuditor(
self.distance_x,
self.distance_y,
self.auditor_nsteps,
self.auditor_lr,
)
distance_q = auditor.distance_q
return auditor, distance_q
def forward_train(self, Q, relevances):
batch_size, num_items, num_features = Q.shape
device = datautils.get_device(Q)
min_lambda = torch.tensor(1e-5, device=device)
if self.lamb is None:
self.lamb = torch.tensor(1.0, device=device)
if type(self.eps) is float:
self.eps = torch.tensor(self.eps, device=device)
if self.rho > 0.0:
Q_worst = self.auditor.generate_worst_case_examples(
self.network, Q, self.lamb
)
mean_dist_q = self.distance_q(Q, Q_worst).mean()
# lr_factor = torch.maximum(mean_dist_q, self.eps) / torch.minimum(
# mean_dist_q, self.eps
# )
lr_factor = 0.5 * self.rho
self.lamb = torch.maximum(
min_lambda, self.lamb + lr_factor * (mean_dist_q - self.eps)
)
scores = self.network(Q).reshape(batch_size, num_items) # (B,N,1) --> B,N
scores_worst = self.network(Q_worst).reshape(batch_size, num_items)
else:
scores = self.network(Q).reshape(batch_size, num_items) # (B,N,1) --> B,N
scores_worst = torch.ones_like(scores)
fair_loss = torch.mean(
-self.__expected_ndcg__(self.monte_carlo_samples_ndcg, scores, relevances)
+ self.rho * self.distance_y(scores, scores_worst)
)
response = FairModelResponse(loss=fair_loss, y_pred=scores)
return response
def forward_test(self, Q):
"""Forward method during the test phase"""
scores = self.network(Q).reshape(Q.shape[:2]) # B,N,1 -> B,N
response = FairModelResponse(y_pred=scores)
return response
def forward(self, Q, relevances, **kwargs):
"""Defines the computation performed at every call.
Parameters
------------
X: torch.Tensor
Input data
Y: torch.Tensor
Expected output data
Returns
----------
output: torch.Tensor
Model output
"""
if self.training:
return self.forward_train(Q, relevances)
else:
return self.forward_test(Q)
def __expected_ndcg__(self, montecarlo_samples, scores, relevances):
"""
uses monte carlo samples to estimate the expected normalized discounted cumulative reward
by using REINFORCE. See section 2 of the reference bellow.
Parameters
-------------
scores: torch.Tensor of dimension B,N
predicted scores for the objects in a batch of queries
relevances: torch.Tensor of dimension B,N
corresponding true relevances of such objects
Returns
------------
expected_ndcg: torch.Tensor of dimension B
monte carlo approximation of the expected ndcg by sampling from a Plackett-Luce
distribution parameterized by :param:`scores`
"""
prob_dist = PlackettLuce(scores)
mc_rankings = prob_dist.sample((montecarlo_samples,))
mc_log_prob = prob_dist.log_prob(mc_rankings)
mc_relevances = self._vect_gather(relevances, 1, mc_rankings)
mc_ndcg = monte_carlo_vect_ndcg(mc_relevances)
expected_utility = (mc_ndcg * mc_log_prob).mean(dim=0)
return expected_utility
| 5,938 | 33.132184 | 97 |
py
|
inFairness
|
inFairness-main/inFairness/fairalgo/sensr.py
|
import torch
from torch import nn
from inFairness.auditor import SenSRAuditor
from inFairness.fairalgo.datainterfaces import FairModelResponse
from inFairness.utils import datautils
class SenSR(nn.Module):
"""Implementes the Sensitive Subspace Robustness (SenSR) algorithm.
Proposed in `Training individually fair ML models with sensitive subspace robustness <https://arxiv.org/abs/1907.00020>`_
Parameters
------------
network: torch.nn.Module
Network architecture
distance_x: inFairness.distances.Distance
Distance metric in the input space
loss_fn: torch.nn.Module
Loss function
eps: float
:math:`\epsilon` parameter in the SenSR algorithm
lr_lamb: float
:math:`\lambda` parameter in the SenSR algorithm
lr_param: float
:math:`\\alpha` parameter in the SenSR algorithm
auditor_nsteps: int
Number of update steps for the auditor to find worst-case examples
auditor_lr: float
Learning rate for the auditor
"""
def __init__(
self,
network,
distance_x,
loss_fn,
eps,
lr_lamb,
lr_param,
auditor_nsteps,
auditor_lr,
):
super().__init__()
self.distance_x = distance_x
self.network = network
self.loss_fn = loss_fn
self.lambda_param = None
self.eps = eps
self.lr_lamb = lr_lamb
self.lr_param = lr_param
self.auditor_nsteps = auditor_nsteps
self.auditor_lr = auditor_lr
self.auditor = self.__init_auditor__()
def __init_auditor__(self):
auditor = SenSRAuditor(
loss_fn=self.loss_fn,
distance_x=self.distance_x,
num_steps=self.auditor_nsteps,
lr=self.auditor_lr,
)
return auditor
def forward_train(self, X, Y):
"""Forward method during the training phase"""
device = datautils.get_device(X)
if self.lambda_param is None:
self.lambda_param = torch.tensor(1.0, device=device)
Y_pred = self.network(X)
X_worst = self.auditor.generate_worst_case_examples(
self.network, X, Y, lambda_param=self.lambda_param
)
self.lambda_param = torch.max(
torch.stack(
[
torch.tensor(0.0, device=device),
self.lambda_param
- self.lr_lamb * (self.eps - self.distance_x(X, X_worst).mean()),
]
)
)
Y_pred_worst = self.network(X_worst)
fair_loss = torch.mean(self.lr_param * self.loss_fn(Y_pred_worst, Y))
response = FairModelResponse(loss=fair_loss, y_pred=Y_pred)
return response
def forward_test(self, X):
"""Forward method during the test phase"""
Y_pred = self.network(X)
response = FairModelResponse(y_pred=Y_pred)
return response
def forward(self, X, Y=None, *args, **kwargs):
"""Defines the computation performed at every call.
Parameters
------------
X: torch.Tensor
Input data
Y: torch.Tensor
Expected output data
Returns
----------
output: torch.Tensor
Model output
"""
if self.training:
return self.forward_train(X, Y)
else:
return self.forward_test(X)
| 3,534 | 27.055556 | 125 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/KPN.py
|
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torchsummary import summary
import torchvision.models as models
# KPN基本网路单元
class Basic(nn.Module):
def __init__(self, in_ch, out_ch, g=16, channel_att=False, spatial_att=False):
super(Basic, self).__init__()
self.channel_att = channel_att
self.spatial_att = spatial_att
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1),
# nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1),
# nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1),
# nn.BatchNorm2d(out_ch),
nn.ReLU()
)
if channel_att:
self.att_c = nn.Sequential(
nn.Conv2d(2*out_ch, out_ch//g, 1, 1, 0),
nn.ReLU(),
nn.Conv2d(out_ch//g, out_ch, 1, 1, 0),
nn.Sigmoid()
)
if spatial_att:
self.att_s = nn.Sequential(
nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3),
nn.Sigmoid()
)
def forward(self, data):
"""
Forward function.
:param data:
:return: tensor
"""
fm = self.conv1(data)
if self.channel_att:
# fm_pool = F.adaptive_avg_pool2d(fm, (1, 1)) + F.adaptive_max_pool2d(fm, (1, 1))
fm_pool = torch.cat([F.adaptive_avg_pool2d(fm, (1, 1)), F.adaptive_max_pool2d(fm, (1, 1))], dim=1)
att = self.att_c(fm_pool)
fm = fm * att
if self.spatial_att:
fm_pool = torch.cat([torch.mean(fm, dim=1, keepdim=True), torch.max(fm, dim=1, keepdim=True)[0]], dim=1)
att = self.att_s(fm_pool)
fm = fm * att
return fm
class KPN(nn.Module):
def __init__(self, color=True, burst_length=8, blind_est=False, kernel_size=[5], sep_conv=False,
channel_att=False, spatial_att=False, upMode='bilinear', core_bias=False):
super(KPN, self).__init__()
self.upMode = upMode
self.burst_length = burst_length
self.core_bias = core_bias
self.color_channel = 3 if color else 1
in_channel = (3 if color else 1) * (burst_length if blind_est else burst_length+1)
out_channel = (3 if color else 1) * (2 * sum(kernel_size) if sep_conv else np.sum(np.array(kernel_size) ** 2)) * burst_length
if core_bias:
out_channel += (3 if color else 1) * burst_length
# 各个卷积层定义
# 2~5层都是均值池化+3层卷积
self.conv1 = Basic(in_channel, 64, channel_att=False, spatial_att=False)
self.conv2 = Basic(64, 128, channel_att=False, spatial_att=False)
self.conv3 = Basic(128, 256, channel_att=False, spatial_att=False)
self.conv4 = Basic(256, 512, channel_att=False, spatial_att=False)
self.conv5 = Basic(512, 512, channel_att=False, spatial_att=False)
# 6~8层要先上采样再卷积
self.conv6 = Basic(512+512, 512, channel_att=channel_att, spatial_att=spatial_att)
self.conv7 = Basic(256+512, 256, channel_att=channel_att, spatial_att=spatial_att)
self.conv8 = Basic(256+128, out_channel, channel_att=channel_att, spatial_att=spatial_att)
self.outc = nn.Conv2d(out_channel, out_channel, 1, 1, 0)
self.kernel_pred = KernelConv(kernel_size, sep_conv, self.core_bias)
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
# 前向传播函数
def forward(self, data_with_est, data, white_level=1.0):
"""
forward and obtain pred image directly
:param data_with_est: if not blind estimation, it is same as data
:param data:
:return: pred_img_i and img_pred
"""
conv1 = self.conv1(data_with_est)
conv2 = self.conv2(F.avg_pool2d(conv1, kernel_size=2, stride=2))
conv3 = self.conv3(F.avg_pool2d(conv2, kernel_size=2, stride=2))
conv4 = self.conv4(F.avg_pool2d(conv3, kernel_size=2, stride=2))
conv5 = self.conv5(F.avg_pool2d(conv4, kernel_size=2, stride=2))
# 开始上采样 同时要进行skip connection
conv6 = self.conv6(torch.cat([conv4, F.interpolate(conv5, scale_factor=2, mode=self.upMode)], dim=1))
conv7 = self.conv7(torch.cat([conv3, F.interpolate(conv6, scale_factor=2, mode=self.upMode)], dim=1))
conv8 = self.conv8(torch.cat([conv2, F.interpolate(conv7, scale_factor=2, mode=self.upMode)], dim=1))
# return channel K*K*N
core = self.outc(F.interpolate(conv8, scale_factor=2, mode=self.upMode))
return self.kernel_pred(data, core, white_level)
class KernelConv(nn.Module):
"""
the class of computing prediction
"""
def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
super(KernelConv, self).__init__()
self.kernel_size = sorted(kernel_size)
self.sep_conv = sep_conv
self.core_bias = core_bias
def _sep_conv_core(self, core, batch_size, N, color, height, width):
"""
convert the sep_conv core to conv2d core
2p --> p^2
:param core: shape: batch*(N*2*K)*height*width
:return:
"""
kernel_total = sum(self.kernel_size)
core = core.view(batch_size, N, -1, color, height, width)
if not self.core_bias:
core_1, core_2 = torch.split(core, kernel_total, dim=2)
else:
core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
# output core
core_out = {}
cur = 0
for K in self.kernel_size:
t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, 3, height, width)
t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, 3, height, width)
core_out[K] = torch.einsum('ijklno,ijlmno->ijkmno', [t1, t2]).view(batch_size, N, K * K, color, height, width)
cur += K
# it is a dict
return core_out, None if not self.core_bias else core_3.squeeze()
def _convert_dict(self, core, batch_size, N, color, height, width):
"""
make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
:param core: shape: batch_size*(N*K*K)*height*width
:return: core_out, a dict
"""
core_out = {}
core = core.view(batch_size, N, -1, color, height, width)
core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0]**2, ...]
bias = None if not self.core_bias else core[:, :, -1, ...]
return core_out, bias
def forward(self, frames, core, white_level=1.0):
"""
compute the pred image according to core and frames
:param frames: [batch_size, N, 3, height, width]
:param core: [batch_size, N, dict(kernel), 3, height, width]
:return:
"""
if len(frames.size()) == 5:
batch_size, N, color, height, width = frames.size()
else:
batch_size, N, height, width = frames.size()
color = 1
frames = frames.view(batch_size, N, color, height, width)
if self.sep_conv:
core, bias = self._sep_conv_core(core, batch_size, N, color, height, width)
else:
core, bias = self._convert_dict(core, batch_size, N, color, height, width)
img_stack = []
pred_img = []
kernel = self.kernel_size[::-1]
for index, K in enumerate(kernel):
if not img_stack:
frame_pad = F.pad(frames, [K // 2, K // 2, K // 2, K // 2])
for i in range(K):
for j in range(K):
img_stack.append(frame_pad[..., i:i + height, j:j + width])
img_stack = torch.stack(img_stack, dim=2)
else:
k_diff = (kernel[index - 1] - kernel[index]) // 2
img_stack = img_stack[:, :, k_diff:-k_diff, ...]
# print('img_stack:', img_stack.size())
pred_img.append(torch.sum(
core[K].mul(img_stack), dim=2, keepdim=False
))
pred_img = torch.stack(pred_img, dim=0)
# print('pred_stack:', pred_img.size())
pred_img_i = torch.mean(pred_img, dim=0, keepdim=False).squeeze()
# if bias is permitted
if self.core_bias:
if bias is None:
raise ValueError('The bias should not be None.')
pred_img_i += bias
# print('white_level', white_level.size())
pred_img_i = pred_img_i / white_level
pred_img = torch.mean(pred_img_i, dim=1, keepdim=False)
# print('pred_img:', pred_img.size())
# print('pred_img_i:', pred_img_i.size())
return pred_img_i, pred_img
class LossFunc(nn.Module):
"""
loss function of KPN
"""
def __init__(self, coeff_basic=1.0, coeff_anneal=1.0, gradient_L1=True, alpha=0.9998, beta=100):
super(LossFunc, self).__init__()
self.coeff_basic = coeff_basic
self.coeff_anneal = coeff_anneal
self.loss_basic = LossBasic(gradient_L1)
self.loss_anneal = LossAnneal(alpha, beta)
def forward(self, pred_img_i, pred_img, ground_truth, global_step):
"""
forward function of loss_func
:param frames: frame_1 ~ frame_N, shape: [batch, N, 3, height, width]
:param core: a dict coverted by ......
:param ground_truth: shape [batch, 3, height, width]
:param global_step: int
:return: loss
"""
return self.coeff_basic * self.loss_basic(pred_img, ground_truth), self.coeff_anneal * self.loss_anneal(global_step, pred_img_i, ground_truth)
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + \
self.l1_loss(self.gradient(pred), self.gradient(ground_truth))
class LossAnneal(nn.Module):
"""
anneal loss function
"""
def __init__(self, alpha=0.9998, beta=100):
super(LossAnneal, self).__init__()
self.global_step = 0
self.loss_func = LossBasic(gradient_L1=True)
self.alpha = alpha
self.beta = beta
def forward(self, global_step, pred_i, ground_truth):
"""
:param global_step: int
:param pred_i: [batch_size, N, 3, height, width]
:param ground_truth: [batch_size, 3, height, width]
:return:
"""
loss = 0
for i in range(pred_i.size(1)):
loss += self.loss_func(pred_i[:, i, ...], ground_truth)
loss /= pred_i.size(1)
return self.beta * self.alpha ** global_step * loss
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[..., 0:w, 0:h])
else:
return torch.sqrt(
torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.pow((u - d)[..., 0:w, 0:h], 2)
)
if __name__ == '__main__':
kpn = KPN(6, 5*5*6, True, True).cuda()
print(summary(kpn, (6, 224, 224), batch_size=4))
| 12,210 | 39.30033 | 150 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/train_eval_syn.py
|
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import argparse
import os, sys, time, shutil
from data_provider import OnTheFlyDataset, _configspec_path
from kpn_data_provider import TrainDataSet, UndosRGBGamma, sRGBGamma
from KPN import KPN, LossFunc
from utils.training_util import MovingAverage, save_checkpoint, load_checkpoint, read_config
from utils.training_util import calculate_psnr, calculate_ssim
from tensorboardX import SummaryWriter
from PIL import Image
from torchvision.transforms import transforms
def train(config, num_workers, num_threads, cuda, restart_train, mGPU):
# torch.set_num_threads(num_threads)
train_config = config['training']
arch_config = config['architecture']
batch_size = train_config['batch_size']
lr = train_config['learning_rate']
weight_decay = train_config['weight_decay']
decay_step = train_config['decay_steps']
lr_decay = train_config['lr_decay']
n_epoch = train_config['num_epochs']
use_cache = train_config['use_cache']
print('Configs:', config)
# checkpoint path
checkpoint_dir = train_config['checkpoint_dir']
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# logs path
logs_dir = train_config['logs_dir']
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
shutil.rmtree(logs_dir)
log_writer = SummaryWriter(logs_dir)
# dataset and dataloader
data_set = TrainDataSet(
train_config['dataset_configs'],
img_format='.bmp',
degamma=True,
color=False,
blind=arch_config['blind_est']
)
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers
)
dataset_config = read_config(train_config['dataset_configs'], _configspec_path())['dataset_configs']
# model here
model = KPN(
color=False,
burst_length=dataset_config['burst_length'],
blind_est=arch_config['blind_est'],
kernel_size=list(map(int, arch_config['kernel_size'].split())),
sep_conv=arch_config['sep_conv'],
channel_att=arch_config['channel_att'],
spatial_att=arch_config['spatial_att'],
upMode=arch_config['upMode'],
core_bias=arch_config['core_bias']
)
if cuda:
model = model.cuda()
if mGPU:
model = nn.DataParallel(model)
model.train()
# loss function here
loss_func = LossFunc(
coeff_basic=1.0,
coeff_anneal=1.0,
gradient_L1=True,
alpha=arch_config['alpha'],
beta=arch_config['beta']
)
# Optimizer here
if train_config['optimizer'] == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=lr
)
elif train_config['optimizer'] == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=lr,
momentum=0.9,
weight_decay=weight_decay
)
else:
raise ValueError("Optimizer must be 'sgd' or 'adam', but received {}.".format(train_config['optimizer']))
optimizer.zero_grad()
# learning rate scheduler here
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=lr_decay)
average_loss = MovingAverage(train_config['save_freq'])
if not restart_train:
try:
checkpoint = load_checkpoint(checkpoint_dir, 'best')
start_epoch = checkpoint['epoch']
global_step = checkpoint['global_iter']
best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['lr_scheduler'])
print('=> loaded checkpoint (epoch {}, global_step {})'.format(start_epoch, global_step))
except:
start_epoch = 0
global_step = 0
best_loss = np.inf
print('=> no checkpoint file to be loaded.')
else:
start_epoch = 0
global_step = 0
best_loss = np.inf
if os.path.exists(checkpoint_dir):
pass
# files = os.listdir(checkpoint_dir)
# for f in files:
# os.remove(os.path.join(checkpoint_dir, f))
else:
os.mkdir(checkpoint_dir)
print('=> training')
burst_length = dataset_config['burst_length']
data_length = burst_length if arch_config['blind_est'] else burst_length+1
patch_size = dataset_config['patch_size']
for epoch in range(start_epoch, n_epoch):
epoch_start_time = time.time()
# decay the learning rate
lr_cur = [param['lr'] for param in optimizer.param_groups]
if lr_cur[0] > 5e-6:
scheduler.step()
else:
for param in optimizer.param_groups:
param['lr'] = 5e-6
print('='*20, 'lr={}'.format([param['lr'] for param in optimizer.param_groups]), '='*20)
t1 = time.time()
for step, (burst_noise, gt, white_level) in enumerate(data_loader):
if cuda:
burst_noise = burst_noise.cuda()
gt = gt.cuda()
# print('white_level', white_level, white_level.size())
#
pred_i, pred = model(burst_noise, burst_noise[:, 0:burst_length, ...], white_level)
#
loss_basic, loss_anneal = loss_func(sRGBGamma(pred_i), sRGBGamma(pred), sRGBGamma(gt), global_step)
loss = loss_basic + loss_anneal
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update the average loss
average_loss.update(loss)
# calculate PSNR
psnr = calculate_psnr(pred.unsqueeze(1), gt.unsqueeze(1))
ssim = calculate_ssim(pred.unsqueeze(1), gt.unsqueeze(1))
# add scalars to tensorboardX
log_writer.add_scalar('loss_basic', loss_basic, global_step)
log_writer.add_scalar('loss_anneal', loss_anneal, global_step)
log_writer.add_scalar('loss_total', loss, global_step)
log_writer.add_scalar('psnr', psnr, global_step)
log_writer.add_scalar('ssim', ssim, global_step)
# print
print('{:-4d}\t| epoch {:2d}\t| step {:4d}\t| loss_basic: {:.4f}\t| loss_anneal: {:.4f}\t|'
' loss: {:.4f}\t| PSNR: {:.2f}dB\t| SSIM: {:.4f}\t| time:{:.2f} seconds.'
.format(global_step, epoch, step, loss_basic, loss_anneal, loss, psnr, ssim, time.time()-t1))
t1 = time.time()
# global_step
global_step += 1
if global_step % train_config['save_freq'] == 0:
if average_loss.get_value() < best_loss:
is_best = True
best_loss = average_loss.get_value()
else:
is_best = False
save_dict = {
'epoch': epoch,
'global_iter': global_step,
'state_dict': model.state_dict(),
'best_loss': best_loss,
'optimizer': optimizer.state_dict(),
'lr_scheduler': scheduler.state_dict()
}
save_checkpoint(
save_dict, is_best, checkpoint_dir, global_step, max_keep=train_config['ckpt_to_keep']
)
print('Epoch {} is finished, time elapsed {:.2f} seconds.'.format(epoch, time.time()-epoch_start_time))
def eval(config, args):
train_config = config['training']
arch_config = config['architecture']
use_cache = train_config['use_cache']
print('Eval Process......')
checkpoint_dir = train_config['checkpoint_dir']
if not os.path.exists(checkpoint_dir) or len(os.listdir(checkpoint_dir)) == 0:
print('There is no any checkpoint file in path:{}'.format(checkpoint_dir))
# the path for saving eval images
eval_dir = train_config['eval_dir']
if not os.path.exists(eval_dir):
os.mkdir(eval_dir)
files = os.listdir(eval_dir)
for f in files:
os.remove(os.path.join(eval_dir, f))
# dataset and dataloader
data_set = TrainDataSet(
train_config['dataset_configs'],
img_format='.bmp',
degamma=True,
color=False,
blind=arch_config['blind_est'],
train=False
)
data_loader = DataLoader(
data_set,
batch_size=1,
shuffle=False,
num_workers=args.num_workers
)
dataset_config = read_config(train_config['dataset_configs'], _configspec_path())['dataset_configs']
# model here
model = KPN(
color=False,
burst_length=dataset_config['burst_length'],
blind_est=arch_config['blind_est'],
kernel_size=list(map(int, arch_config['kernel_size'].split())),
sep_conv=arch_config['sep_conv'],
channel_att=arch_config['channel_att'],
spatial_att=arch_config['spatial_att'],
upMode=arch_config['upMode'],
core_bias=arch_config['core_bias']
)
if args.cuda:
model = model.cuda()
if args.mGPU:
model = nn.DataParallel(model)
# load trained model
ckpt = load_checkpoint(checkpoint_dir, args.checkpoint)
model.load_state_dict(ckpt['state_dict'])
print('The model has been loaded from epoch {}, n_iter {}.'.format(ckpt['epoch'], ckpt['global_iter']))
# switch the eval mode
model.eval()
# data_loader = iter(data_loader)
burst_length = dataset_config['burst_length']
data_length = burst_length if arch_config['blind_est'] else burst_length + 1
patch_size = dataset_config['patch_size']
trans = transforms.ToPILImage()
with torch.no_grad():
psnr = 0.0
ssim = 0.0
for i, (burst_noise, gt, white_level) in enumerate(data_loader):
if i < 100:
# data = next(data_loader)
if args.cuda:
burst_noise = burst_noise.cuda()
gt = gt.cuda()
white_level = white_level.cuda()
pred_i, pred = model(burst_noise, burst_noise[:, 0:burst_length, ...], white_level)
pred_i = sRGBGamma(pred_i)
pred = sRGBGamma(pred)
gt = sRGBGamma(gt)
burst_noise = sRGBGamma(burst_noise / white_level)
psnr_t = calculate_psnr(pred.unsqueeze(1), gt.unsqueeze(1))
ssim_t = calculate_ssim(pred.unsqueeze(1), gt.unsqueeze(1))
psnr_noisy = calculate_psnr(burst_noise[:, 0, ...].unsqueeze(1), gt.unsqueeze(1))
psnr += psnr_t
ssim += ssim_t
pred = torch.clamp(pred, 0.0, 1.0)
if args.cuda:
pred = pred.cpu()
gt = gt.cpu()
burst_noise = burst_noise.cpu()
trans(burst_noise[0, 0, ...].squeeze()).save(os.path.join(eval_dir, '{}_noisy_{:.2f}dB.png'.format(i, psnr_noisy)), quality=100)
trans(pred.squeeze()).save(os.path.join(eval_dir, '{}_pred_{:.2f}dB.png'.format(i, psnr_t)), quality=100)
trans(gt.squeeze()).save(os.path.join(eval_dir, '{}_gt.png'.format(i)), quality=100)
print('{}-th image is OK, with PSNR: {:.2f}dB, SSIM: {:.4f}'.format(i, psnr_t, ssim_t))
else:
break
print('All images are OK, average PSNR: {:.2f}dB, SSIM: {:.4f}'.format(psnr/100, ssim/100))
if __name__ == '__main__':
# argparse
parser = argparse.ArgumentParser(description='parameters for training')
parser.add_argument('--config_file', dest='config_file', default='kpn_specs/kpn_config.conf', help='path to config file')
parser.add_argument('--config_spec', dest='config_spec', default='kpn_specs/configspec.conf', help='path to config spec file')
parser.add_argument('--restart', action='store_true', help='Whether to remove all old files and restart the training process')
parser.add_argument('--num_workers', '-nw', default=4, type=int, help='number of workers in data loader')
parser.add_argument('--num_threads', '-nt', default=8, type=int, help='number of threads in data loader')
parser.add_argument('--cuda', '-c', action='store_true', help='whether to train on the GPU')
parser.add_argument('--mGPU', '-m', action='store_true', help='whether to train on multiple GPUs')
parser.add_argument('--eval', action='store_true', help='whether to work on the evaluation mode')
parser.add_argument('--checkpoint', '-ckpt', dest='checkpoint', type=str, default='best',
help='the checkpoint to eval')
args = parser.parse_args()
#
config = read_config(args.config_file, args.config_spec)
if args.eval:
eval(config, args)
else:
train(config, args.num_workers, args.num_threads, args.cuda, args.restart, args.mGPU)
| 13,114 | 37.014493 | 144 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/kpn_data_provider.py
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import os
from PIL import Image
import numpy as np
from skimage.color import rgb2xyz
import inspect
from utils.training_util import read_config
from data_generation.data_utils import *
import torch.nn.functional as F
def sRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
# image_lo = tensor * mult
# 0.001 is to avoid funny thing at 0.
# image_hi = (1 + a) * torch.pow(tensor + 0.001, 1.0 / gamma) - a
res[mask] = (1 + a) * torch.pow(tensor[mask] + 0.001, 1.0 / gamma) - a
res[1-mask] = tensor[1-mask] * mult
# return mask * image_hi + (1 - mask) * image_lo
return res
def UndosRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
# image_lo = tensor / mult
# image_hi = torch.pow(tensor + a, gamma) / (1 + a)
res[1-mask] = tensor[1-mask] / mult
res[mask] = torch.pow(tensor[mask] + a, gamma) / (1 + a)
# return mask * image_hi + (1 - mask) * image_lo
return res
class Random_Horizontal_Flip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, tensor):
if np.random.rand() < self.p:
return torch.flip(tensor, dims=[-1])
return tensor
class Random_Vertical_Flip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, tensor):
if np.random.rand() < self.p:
return torch.flip(tensor, dims=[-2])
return tensor
class TrainDataSet(torch.utils.data.Dataset):
def __init__(self, config_file,
config_spec=None, img_format='.bmp', degamma=True, color=True, blind=False, train=True):
super(TrainDataSet, self).__init__()
if config_spec is None:
config_spec = self._configspec_path()
config = read_config(config_file, config_spec)
self.dataset_config = config['dataset_configs']
self.dataset_dir = self.dataset_config['dataset_dir']
self.images = list(filter(lambda x: True if img_format in x else False, os.listdir(self.dataset_dir)))
self.burst_size = self.dataset_config['burst_length']
self.patch_size = self.dataset_config['patch_size']
self.upscale = self.dataset_config['down_sample']
self.big_jitter = self.dataset_config['big_jitter']
self.small_jitter = self.dataset_config['small_jitter']
# 对应下采样之前图像的最大偏移量
self.jitter_upscale = self.big_jitter * self.upscale
# 对应下采样之前的图像的patch尺寸
self.size_upscale = self.patch_size * self.upscale + 2 * self.jitter_upscale
# 产生大jitter和小jitter之间的delta 在下采样之前的尺度上
self.delta_upscale = (self.big_jitter - self.small_jitter) * self.upscale
# 对应到原图的patch的尺寸
self.patch_size_upscale = self.patch_size * self.upscale
# 去伽马效应
self.degamma = degamma
# 是否用彩色图像进行处理
self.color = color
# 是否盲估计 盲估计即估计的噪声方差不会作为网络的输入
self.blind = blind
self.train = train
self.vertical_flip = Random_Vertical_Flip(p=0.5)
self.horizontal_flip = Random_Horizontal_Flip(p=0.5)
@staticmethod
def _configspec_path():
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
return os.path.join(current_dir,
'dataset_specs/data_configspec.conf')
@staticmethod
def crop_random(tensor, patch_size):
return random_crop(tensor, 1, patch_size)[0]
# get一个item 根据index检索
def __getitem__(self, index):
# print(index)
image = Image.open(os.path.join(self.dataset_dir, self.images[index])).convert('RGB')
# 先转换为Tensor进行degamma
image = transforms.ToTensor()(image)
# if self.degamma:
# image = UndosRGBGamma(tensor=image)
image_crop = self.crop_random(image, self.size_upscale)
# 3*H*W 对应于较小jitter下
image_crop_small = image_crop[:, self.delta_upscale:-self.delta_upscale,
self.delta_upscale:-self.delta_upscale]
# 进一步进行random_crop所需的transform
# burst中的第一个不做偏移 后期作为target
# output shape: N*3*H*W
img_burst = []
for i in range(self.burst_size):
if i == 0:
img_burst.append(
image_crop[:, self.jitter_upscale:-self.jitter_upscale, self.jitter_upscale:-self.jitter_upscale]
)
else:
if np.random.binomial(1, min(1.0, np.random.poisson(lam=1.5) / self.burst_size)) == 0:
img_burst.append(
self.crop_random(
image_crop_small, self.patch_size_upscale
)
)
else: #big
img_burst.append(
self.crop_random(image_crop, self.patch_size_upscale)
)
image_burst = torch.stack(img_burst, dim=0)
image_burst = F.adaptive_avg_pool2d(image_burst, (self.patch_size, self.patch_size))
# label为patch中burst的第一个
if not self.color:
image_burst = 0.2989*image_burst[:, 0, ...] + 0.5870 * image_burst[:, 1, ...] + 0.1140*image_burst[:, 2, ...]
image_burst = torch.clamp(image_burst, 0.0, 1.0)
if self.degamma:
UndosRGBGamma(image_burst)
if self.train:
# data augment
image_burst = self.horizontal_flip(image_burst)
image_burst = self.vertical_flip(image_burst)
gt = image_burst[0, ...]
# 以上得到的patch size为burst*(3)*size*size
"""
数据加噪声等一系列处理 全部基于rgb图像做
"""
# 要产生[log10(0.1), log10(1.0)]之间的均匀分布随机数 也就是[0,1加负号即可]
# 产生pred之后 再除以white_level恢复原来的亮度
# batch中的每一个burst 产生一个white_level
white_level = torch.from_numpy(np.power(10, -np.random.rand(1, 1, 1))).type_as(image_burst)
# 论文中对图像亮度赋值进行线性缩放[0.1, 1]
image_burst = white_level * image_burst
# gray image
if not self.color:
# 生成随机的read和shot噪声方差
sigma_read = torch.from_numpy(
np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1)))).type_as(image_burst)
sigma_shot = torch.from_numpy(
np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1)))).type_as(image_burst)
# sigma_read = torch.from_numpy(2*np.power(10, np.array([[[-2.0]]]))).type_as(image_burst)
# sigma_shot = torch.from_numpy(6.4 * np.power(10, np.array([[[-3.0]]]))).type_as(image_burst)
# 产生噪声 依据论文中公式产生
sigma_read_com = sigma_read.expand_as(image_burst)
sigma_shot_com = sigma_shot.expand_as(image_burst)
# generate noise
burst_noise = torch.normal(image_burst, torch.sqrt(sigma_read_com**2 + image_burst * sigma_shot_com)).type_as(image_burst)
# burst_noise 恢复到[0,1] 截去外面的值
burst_noise = torch.clamp(burst_noise, 0.0, 1.0)
# 非盲估计 就要估计噪声的方差
if not self.blind:
# 接下来就是根据两个sigma 将估计的噪声标准差也作为输入 用burst中的第一个进行估计
# estimation shape: H*W
sigma_read_est = sigma_read.view(1, 1).expand_as(gt)
sigma_shot_est = sigma_shot.view(1, 1).expand_as(gt)
sigma_estimate = torch.sqrt(sigma_read_est ** 2 + sigma_shot_est.mul(
torch.max(torch.stack([burst_noise[0, ...], torch.zeros_like(burst_noise[0, ...])], dim=0), dim=0)[0]))
# 把噪声的估计和burst图像连接在一起
burst_noise = torch.cat([burst_noise, sigma_estimate.unsqueeze(0)], dim=0)
# 按照文章中的 ref Image作为target进行了训练 输出结果和ref很相似 没能起到太大的去噪作用
# return patches_with_noise, patches_with_noise[:, 0, ...], white_level
# 不含噪声的ref作为target进行测试
return burst_noise, gt, white_level
# color image
else:
# 生成随机的read和shot噪声方差
sigma_read = torch.from_numpy(
np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1, 1)))).type_as(image_burst)
sigma_shot = torch.from_numpy(
np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1, 1)))).type_as(image_burst)
# 产生噪声 依据论文中公式产生
sigma_read_com = sigma_read.expand_as(image_burst)
sigma_shot_com = sigma_shot.expand_as(image_burst)
# generate noise
burst_noise = torch.normal(image_burst,
torch.sqrt(sigma_read_com ** 2 + image_burst * sigma_shot_com)).type_as(image_burst)
# burst_noise 恢复到[0,1] 截去外面的值
burst_noise = torch.clamp(burst_noise, 0.0, 1.0)
# 非盲估计 就要估计噪声的方差
if not self.blind:
# 接下来就是根据两个sigma 将估计的噪声标准差也作为输入 用burst中的第一个进行估计
# estimation shape: H*W
sigma_read_est = sigma_read.view(1, 1, 1).expand_as(gt)
sigma_shot_est = sigma_shot.view(1, 1, 1).expand_as(gt)
sigma_estimate = torch.sqrt(sigma_read_est ** 2 + sigma_shot_est.mul(
torch.max(torch.stack([burst_noise[0, ...], torch.zeros_like(burst_noise[0, ...])], dim=0), dim=0)[0]))
# 把噪声的估计和burst图像连接在一起
burst_noise = torch.cat([burst_noise, sigma_estimate.unsqueeze(0)], dim=0)
white_level = white_level.unsqueeze(0)
return burst_noise, gt, white_level
def __len__(self):
return len(self.images)
if __name__ == '__main__':
# path = 'F:/BinZhang/Codes/deep-burst-denoising/data/train'
# dataset = TrainDataSet(path, '.jpg', 8, 128, 4, 16, 2, color=False)
# dataloader = DataLoader(dataset,
# batch_size=4,
# shuffle=True,
# num_workers=4)
# dataloader = iter(dataloader)
# a, b, c = next(dataloader)
# print(a.size(), b.size(), c.size())
hf = Random_Horizontal_Flip(0.5)
a = torch.randint(0, 10, (2, 2))
print(a, hf(a))
| 10,342 | 37.737828 | 134 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_provider.py
|
import glob
import inspect
import os
import zlib
from time import time
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data as data
from PIL import Image
from torch import FloatTensor
from data_generation.pipeline import ImageDegradationPipeline
from utils.image_utils import bayer_crop_tensor
from utils.training_util import read_config
DEBUG_TIME = False
def _configspec_path():
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
return os.path.join(current_dir,
'dataset_specs/data_configspec.conf')
class OnTheFlyDataset(data.Dataset):
def __init__(self,
config_file,
config_spec=None,
blind=False,
cropping="random",
cache_dir=None,
use_cache=False,
dataset_name="synthetic"):
""" Dataset for generating degraded images on the fly.
Args:
pipeline_configs: dictionary of boolean flags controlling how
pipelines are created.
pipeline_param_ranges: dictionary of ranges of params.
patch_dir: directory to load linear patches.
config_file: path to data config file
im_size: tuple of (w, h)
config_spec: path to data config spec file
cropping: cropping mode ["random", "center"]
"""
super().__init__()
if config_spec is None:
config_spec = _configspec_path()
config = read_config(config_file, config_spec)
# self.config_file = config_file
# dictionary of dataset configs
self.dataset_configs = config['dataset_configs']
# directory to load linear patches
patch_dir = self.dataset_configs['dataset_dir']
# dictionary of boolean flags controlling how pipelines are created
# (see data_configspec for detail).
self.pipeline_configs = config['pipeline_configs']
# dictionary of ranges of params (see data_configspec for detail).
self.pipeline_param_ranges = config['pipeline_param_ranges']
file_list = glob.glob(os.path.join(patch_dir,
'*.pth'))
file_list = [os.path.basename(f) for f in file_list]
file_list = [os.path.splitext(f)[0] for f in file_list]
self.file_list = sorted(file_list, key=lambda x: zlib.adler32(x.encode('utf-8')))
# print(self.file_list)
# self.pipeline_param_ranges = pipeline_param_ranges
# self.pipeline_configs = pipeline_configs
# print('Data Pipeline Configs: ', self.pipeline_configs)
# print('Data Pipeline Param Ranges: ', self.pipeline_param_ranges)
# some variables about the setting of dataset
self.data_root = patch_dir
self.im_size = self.dataset_configs['patch_size'] # the size after down-sample
extra_for_bayer = 2 # extra size used for the random choice for bayer pattern
self.big_jitter = self.dataset_configs['big_jitter']
self.small_jitter = self.dataset_configs['small_jitter']
self.down_sample = self.dataset_configs['down_sample']
# image size corresponding to original image (include big jitter)
self.im_size_upscale = (self.im_size + 2 * self.big_jitter + extra_for_bayer) * self.down_sample
# from big jitter image to real image with extra pixels to random choose the bayer pattern
self.big_restore_upscale = self.big_jitter * self.down_sample
# the shift pixels of small jitter within upscale
self.small_restore_upscale = self.small_jitter * self.down_sample
# from big jitter images to small jitter images
self.big2small_upscale = (self.big_jitter - self.small_jitter) * self.down_sample
#
self.im_size_extra = (self.im_size + extra_for_bayer) * self.down_sample
# blind estimate?
self.blind = blind
# others
self.cropping = cropping
self.use_cache = use_cache
self.cache_dir = cache_dir
sz = "{}x{}".format(self.im_size, self.im_size) \
if self.im_size is not None else "None"
self.dataset_name = "_".join([dataset_name, sz])
# add the codes by Bin Zhang
self.burst_length = self.dataset_configs['burst_length']
def _get_filename(self, idx):
# folder = os.path.join(self.cache_dir, self.dataset_name)
folder = self.cache_dir
if not os.path.exists(folder):
os.makedirs(folder)
# filename = os.path.join(folder, self.dataset_name + "_{:06d}.pth".format(idx))
filename = os.path.join(folder, "{:06d}.pth".format(idx))
return filename
def _save_tensor(self, tensor_dicts, idx):
filename = self._get_filename(idx)
try:
torch.save(tensor_dicts, filename)
except OSError as e:
print("Warning write failed.")
print(e)
def _load_tensor(self, idx):
filename = self._get_filename(idx)
return torch.load(filename)
def _random_log_uniform(self, a, b):
if self.legacy_uniform:
return np.random.uniform(a, b)
val = np.random.uniform(np.log(a), np.log(b))
return np.exp(val)
def _randomize_parameter(self):
if "use_log_uniform" in self.pipeline_configs:
self.legacy_uniform = not self.pipeline_configs["use_log_uniform"]
else:
self.legacy_uniform = True
exp_adjustment = np.random.uniform(self.pipeline_param_ranges["min_exposure_adjustment"],
self.pipeline_param_ranges["max_exposure_adjustment"])
poisson_k = self._random_log_uniform(self.pipeline_param_ranges["min_poisson_noise"],
self.pipeline_param_ranges["max_poisson_noise"])
read_noise_sigma = self._random_log_uniform(self.pipeline_param_ranges["min_gaussian_noise"],
self.pipeline_param_ranges["max_gaussian_noise"])
chromatic_aberration = np.random.uniform(self.pipeline_param_ranges["min_chromatic_aberration"],
self.pipeline_param_ranges["max_chromatic_aberration"])
motionblur_segment = np.random.randint(self.pipeline_param_ranges["min_motionblur_segment"],
self.pipeline_param_ranges["max_motionblur_segment"])
motion_blur = []
motion_blur_dir = []
for i in range(motionblur_segment):
motion_blur.append(np.random.uniform(self.pipeline_param_ranges["min_motion_blur"],
self.pipeline_param_ranges["max_motion_blur"])
)
motion_blur_dir.append(np.random.uniform(0.0, 360.0))
jpeg_quality = np.random.randint(self.pipeline_param_ranges["min_jpeg_quality"],
self.pipeline_param_ranges["max_jpeg_quality"])
denoise_sigma_s = self._random_log_uniform(self.pipeline_param_ranges["min_denoise_sigma_s"],
self.pipeline_param_ranges["max_denoise_sigma_s"])
denoise_sigma_r = self._random_log_uniform(self.pipeline_param_ranges["min_denoise_sigma_r"],
self.pipeline_param_ranges["max_denoise_sigma_r"])
denoise_color_sigma_ratio = self._random_log_uniform(
self.pipeline_param_ranges["min_denoise_color_sigma_ratio"],
self.pipeline_param_ranges["max_denoise_color_sigma_ratio"])
denoise_color_range_ratio = self._random_log_uniform(
self.pipeline_param_ranges["min_denoise_color_range_ratio"],
self.pipeline_param_ranges["max_denoise_color_range_ratio"])
unsharp_amount = np.random.uniform(self.pipeline_param_ranges["min_unsharp_amount"],
self.pipeline_param_ranges["max_unsharp_amount"])
denoise_median_sz = np.random.randint(self.pipeline_param_ranges["min_denoise_median_sz"],
self.pipeline_param_ranges["max_denoise_median_sz"])
quantize_bits = np.random.randint(self.pipeline_param_ranges["min_quantize_bits"],
self.pipeline_param_ranges["max_quantize_bits"])
wavelet_sigma = np.random.uniform(self.pipeline_param_ranges["min_wavelet_sigma"],
self.pipeline_param_ranges["max_wavelet_sigma"])
motionblur_th = np.random.uniform(self.pipeline_param_ranges["min_motionblur_th"],
self.pipeline_param_ranges["max_motionblur_th"])
motionblur_boost = self._random_log_uniform(self.pipeline_param_ranges["min_motionblur_boost"],
self.pipeline_param_ranges["max_motionblur_boost"])
return dict(
exp_adjustment=exp_adjustment,
poisson_k=poisson_k,
read_noise_sigma=read_noise_sigma,
chromatic_aberration=chromatic_aberration,
motion_blur=motion_blur,
motion_blur_dir=motion_blur_dir,
jpeg_quality=jpeg_quality,
denoise_sigma_s=denoise_sigma_s,
denoise_sigma_r=denoise_sigma_r,
denoise_color_sigma_ratio=denoise_color_sigma_ratio,
denoise_color_range_ratio=denoise_color_range_ratio,
unsharp_amount=unsharp_amount,
denoise_median=denoise_median_sz,
quantize_bits=quantize_bits,
wavelet_sigma=wavelet_sigma,
motionblur_th=motionblur_th,
motionblur_boost=motionblur_boost,
)
@staticmethod
def _create_pipeline(exp_adjustment,
poisson_k,
read_noise_sigma,
chromatic_aberration,
motion_blur_dir,
jpeg_quality,
denoise_sigma_s,
denoise_sigma_r,
denoise_color_sigma_ratio,
unsharp_amount,
denoise_color_only,
demosaick,
denoise,
jpeg_compression,
use_motion_blur,
use_chromatic_aberration,
use_unsharp_mask,
exposure_correction,
quantize,
quantize_bits=8,
denoise_guide_transform=None,
denoise_n_iter=1,
demosaick_use_median=False,
demosaick_n_iter=0,
use_median_denoise=False,
median_before_bilateral=False,
denoise_median=None,
denoise_median_ratio=1.0,
denoise_median_n_iter=1,
demosaicked_input=True,
log_blackpts=0.004,
bilateral_class="DenoisingSKImageBilateralNonDifferentiable",
demosaick_class="AHDDemosaickingNonDifferentiable",
demosaick_ahd_delta=2.0,
demosaick_ahd_sobel_sz=3,
demosaick_ahd_avg_sz=3,
use_wavelet=False,
wavelet_family="db2",
wavelet_sigma=None,
wavelet_th_method="BayesShrink",
wavelet_levels=None,
motion_blur=None,
motionblur_th=None,
motionblur_boost=None,
motionblur_segment=1,
debug=False,
bayer_crop_phase=None,
saturation=None,
use_autolevel=False,
autolevel_max=1.5,
autolevel_blk=1,
autolevel_wht=99,
denoise_color_range_ratio=1,
wavelet_last=False,
wavelet_threshold=None,
wavelet_filter_chrom=True,
post_tonemap_class=None,
post_tonemap_amount=None,
pre_tonemap_class=None,
pre_tonemap_amount=None,
post_tonemap_class2=None,
post_tonemap_amount2=None,
repair_hotdead_pixel=False,
hot_px_th=0.2,
white_balance=False,
white_balance_temp=6504,
white_balance_tint=0,
use_tone_curve3zones=False,
tone_curve_highlight=0.0,
tone_curve_midtone=0.0,
tone_curve_shadow=0.0,
tone_curve_midshadow=None,
tone_curve_midhighlight=None,
unsharp_radius=4.0,
unsharp_threshold=3.0,
**kwargs):
# Define image degradation pipeline
# add motion blur and chromatic aberration
configs_degrade = []
# Random threshold
if demosaicked_input:
# These are features that only make sense to simulate in
# demosaicked input.
if use_motion_blur:
configs_degrade += [
('MotionBlur', {'amt': motion_blur,
'direction': motion_blur_dir,
'kernel_sz': None,
'dynrange_th': motionblur_th,
'dynrange_boost': motionblur_boost,
}
)
]
if use_chromatic_aberration:
configs_degrade += [
('ChromaticAberration', {'scaling': chromatic_aberration}),
]
configs_degrade.append(('ExposureAdjustment', {'nstops': exp_adjustment}))
if demosaicked_input:
if demosaick:
configs_degrade += [
('BayerMosaicking', {}),
]
mosaick_pattern = 'bayer'
else:
mosaick_pattern = None
else:
mosaick_pattern = 'bayer'
# Add artificial noise.
configs_degrade += [
('PoissonNoise', {'sigma': poisson_k, 'mosaick_pattern': mosaick_pattern}),
('GaussianNoise', {'sigma': read_noise_sigma, 'mosaick_pattern': mosaick_pattern}),
]
if quantize:
configs_degrade += [
('PixelClip', {}),
('Quantize', {'nbits': quantize_bits}),
]
if repair_hotdead_pixel:
configs_degrade += [
("RepairHotDeadPixel", {"threshold": hot_px_th}),
]
if demosaick:
configs_degrade += [
(demosaick_class, {'use_median_filter': demosaick_use_median,
'n_iter': demosaick_n_iter,
'delta': demosaick_ahd_delta,
'sobel_sz': demosaick_ahd_sobel_sz,
'avg_sz': demosaick_ahd_avg_sz,
}),
('PixelClip', {}),
]
if white_balance:
configs_degrade += [
('WhiteBalanceTemperature', {"new_temp": white_balance_temp,
"new_tint": white_balance_tint,
}),
]
if pre_tonemap_class is not None:
kw = "gamma" if "Gamma" in pre_tonemap_class else "amount"
configs_degrade += [
(pre_tonemap_class, {kw: pre_tonemap_amount})
]
if use_autolevel:
configs_degrade.append(('AutoLevelNonDifferentiable', {'max_mult': autolevel_max,
'blkpt': autolevel_blk,
'whtpt': autolevel_wht,
}))
denoise_list = []
if denoise:
denoise_list.append([
('PixelClip', {}),
(bilateral_class, {'sigma_s': denoise_sigma_s,
'sigma_r': denoise_sigma_r,
'color_sigma_ratio': denoise_color_sigma_ratio,
'color_range_ratio': denoise_color_range_ratio,
'filter_lum': not denoise_color_only,
'n_iter': denoise_n_iter,
'guide_transform': denoise_guide_transform,
'_bp': log_blackpts,
}),
('PixelClip', {}),
])
if use_median_denoise:
# TODO: Fix this.
# Special value because our config can't specify list of list
if denoise_median == -1:
denoise_median = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
if debug:
print("Denoising with Median Filter")
denoise_list.append([
('DenoisingMedianNonDifferentiable', {'neighbor_sz': denoise_median,
'color_sigma_ratio': denoise_median_ratio,
'n_iter': denoise_median_n_iter,
}),
])
if median_before_bilateral:
denoise_list = denoise_list[::-1]
if use_wavelet:
# always do wavelet first.
wavelet_config = [
('PixelClip', {}),
("DenoisingWaveletNonDifferentiable", {'sigma_s': wavelet_th_method,
'sigma_r': wavelet_sigma,
'color_sigma_ratio': wavelet_family,
'filter_lum': True,
'n_iter': wavelet_levels,
'guide_transform': denoise_guide_transform,
'_bp': wavelet_threshold,
'filter_chrom': wavelet_filter_chrom,
}),
('PixelClip', {}),
]
if wavelet_last:
denoise_list.append(wavelet_config)
else:
denoise_list.insert(0, wavelet_config)
for i in range(len(denoise_list)):
configs_degrade += denoise_list[i]
if post_tonemap_class is not None:
kw = "gamma" if "Gamma" in post_tonemap_class else "amount"
configs_degrade += [
(post_tonemap_class, {kw: post_tonemap_amount})
]
if post_tonemap_class2 is not None:
kw = "gamma" if "Gamma" in post_tonemap_class2 else "amount"
configs_degrade += [
(post_tonemap_class2, {kw: post_tonemap_amount2})
]
if use_tone_curve3zones:
ctrl_val = [t for t in [tone_curve_shadow,
tone_curve_midshadow,
tone_curve_midtone,
tone_curve_midhighlight,
tone_curve_highlight] if t is not None]
configs_degrade += [
('ToneCurveNZones', {'ctrl_val': ctrl_val,
}),
('PixelClip', {}),
]
if use_unsharp_mask:
configs_degrade += [
('Unsharpen', {'amount': unsharp_amount,
'radius': unsharp_radius,
'threshold': unsharp_threshold}),
('PixelClip', {}),
]
if saturation is not None:
configs_degrade.append(('Saturation', {'value': saturation}))
# things that happens after camera apply denoising, etc.
if jpeg_compression:
configs_degrade += [
('sRGBGamma', {}),
('Quantize', {'nbits': 8}),
('PixelClip', {}),
('JPEGCompression', {"quality": jpeg_quality}),
('PixelClip', {}),
('UndosRGBGamma', {}),
('PixelClip', {}),
]
else:
if quantize:
configs_degrade += [
('Quantize', {'nbits': 8}),
('PixelClip', {}),
]
if exposure_correction:
# Finally do exposure correction of weird jpeg-compressed image to get crappy images.
configs_degrade.append(('ExposureAdjustment', {'nstops': -exp_adjustment}))
target_pipeline = None
else:
configs_target = [
('ExposureAdjustment', {'nstops': exp_adjustment}),
('PixelClip', {}),
]
target_pipeline = ImageDegradationPipeline(configs_target)
configs_degrade.append(('PixelClip', {}))
if debug:
print('Final config:')
print('\n'.join([str(c) for c in configs_degrade]))
degrade_pipeline = ImageDegradationPipeline(configs_degrade)
return degrade_pipeline, target_pipeline
def __getitem__(self, index):
if self.use_cache:
try:
data = self._load_tensor(index)
return data
except:
# unsucessful at loading
pass
t0 = time()
# original image
target_path = os.path.join(self.data_root,
self.file_list[index] + '.pth')
# img = np.load(target_path).astype('float32')
img = (np.array(Image.open(target_path)) / 255.0).astype(np.float32)
# degradation pipeline, only one needing for N frame
t1_load = time()
degrade_param = self._randomize_parameter()
degrade_pipeline, target_pipeline = self._create_pipeline(**{**self.pipeline_configs,
**degrade_param})
t2_create_pipeline = time()
# Actually process image.
img = FloatTensor(img).permute(2, 0, 1)
# Crop first so that we don't waste computation on the whole image.
# image with big jitter on original image
img_big_jitter = bayer_crop_tensor(
img, self.im_size_upscale, self.im_size_upscale, self.cropping
)
if len(img_big_jitter.size()) == 3:
img_big_jitter = img_big_jitter.unsqueeze(0)
# get N frames with big or small jitters
burst_jitter = []
for i in range(self.burst_length):
# this is the ref. frame without shift
if i == 0:
burst_jitter.append(
F.interpolate(
img_big_jitter[:, :, self.big_restore_upscale:-self.big_restore_upscale,
self.big_restore_upscale:-self.big_restore_upscale],
scale_factor=1 / self.down_sample
)
)
else:
# whether to flip the coin
big_jitter = np.random.binomial(1, np.random.poisson(lam=1.5) / self.burst_length)
if big_jitter:
burst_jitter.append(
F.interpolate(
bayer_crop_tensor(
img_big_jitter,
self.im_size_extra,
self.im_size_extra,
self.cropping
),
scale_factor=1 / self.down_sample
)
)
else:
img_small_jitter = img_big_jitter[:, :, self.big2small_upscale:-self.big2small_upscale,
self.big2small_upscale:-self.big2small_upscale]
burst_jitter.append(
F.interpolate(
bayer_crop_tensor(
img_small_jitter,
self.im_size_extra,
self.im_size_extra,
self.cropping
),
scale_factor=1 / self.down_sample
)
)
burst_jitter = torch.cat(burst_jitter, dim=0)
degraded = torch.zeros_like(burst_jitter)
for i in range(self.burst_length):
degraded[i, ...] = degrade_pipeline(burst_jitter[i, ...])
# degraded = degrade_pipeline(target)
target = burst_jitter[0, ...]
# if not blind estimation, compute the estimated noise
if not self.blind:
read_sigma, poisson_k = degrade_param['read_noise_sigma'], degrade_param['poisson_k']
noise = torch.sqrt(
read_sigma ** 2 + poisson_k ** 2 * degraded[0, ...]
).unsqueeze(0)
degraded = torch.cat([degraded, noise], dim=0)
# If not exposure correction, also apply exposure adjustment to the image.
if not self.pipeline_configs["exposure_correction"]:
target = target_pipeline(target).squeeze()
t3_degrade = time()
exp_adjustment = degrade_param['exp_adjustment']
# Bayer phase selection
target = target.unsqueeze(0)
im = torch.cat([degraded, target], 0)
if self.pipeline_configs["bayer_crop_phase"] is None:
# There are 4 phases of Bayer mosaick.
phase = np.random.choice(4)
else:
phase = self.pipeline_configs["bayer_crop_phase"]
x = phase % 2
y = (phase // 2) % 2
im = im[:, :, y:(y + self.im_size), x:(x + self.im_size)]
degraded, target = torch.split(im, self.burst_length if self.blind else self.burst_length + 1, dim=0)
t4_bayerphase = time()
t5_resize = time()
vis_exposure = 0 if self.pipeline_configs["exposure_correction"] else -exp_adjustment
t6_bayermask = time()
if DEBUG_TIME:
# report
print("--------------------------------------------")
t_total = (t6_bayermask - t0) / 100.0
t_load = t1_load - t0
t_create_pipeline = t2_create_pipeline - t1_load
t_process = t3_degrade - t2_create_pipeline
t_bayercrop = t4_bayerphase - t3_degrade
t_resize = t5_resize - t4_bayerphase
t_bayermask = t6_bayermask - t5_resize
print("load: {} ({}%)".format(t_load, t_load / t_total))
print("create_pipeline: {} ({}%)".format(t_create_pipeline, t_create_pipeline / t_total))
print("process: {} ({}%)".format(t_process, t_process / t_total))
print("bayercrop: {} ({}%)".format(t_bayercrop, t_bayercrop / t_total))
print("resize: {} ({}%)".format(t_resize, t_resize / t_total))
print("bayermask: {} ({}%)".format(t_bayermask, t_bayermask / t_total))
print("--------------------------------------------")
data = {'degraded_img': degraded,
'original_img': target.squeeze(),
'vis_exposure': FloatTensor([vis_exposure]),
}
if self.use_cache:
# TODO: Start a new thread to save.
self._save_tensor(data, index)
return data
def __len__(self):
return len(self.file_list)
class sampler(torch.utils.data.Sampler):
def __init__(self, data_source, num_samples):
self.num_samples = num_samples
self.total_num = len(data_source)
def __iter__(self):
if self.total_num % self.num_samples != 0:
return iter(torch.randperm(self.total_num).tolist() + torch.randperm(self.total_num).tolist()[0:(
self.total_num // self.num_samples + 1) * self.num_samples - self.total_num])
else:
return iter(torch.randperm(self.total_num).tolist())
if __name__ == '__main__':
# import argparse
# from torch.utils.data import DataLoader
#
# parser = argparse.ArgumentParser(description='parameters for training')
# parser.add_argument('--config_file', dest='config_file', default='kpn_specs/kpn_config.conf',
# help='path to config file')
# parser.add_argument('--config_spec', dest='config_spec', default='kpn_specs/configspec.conf',
# help='path to config spec file')
# parser.add_argument('--restart', action='store_true',
# help='Whether to remove all old files and restart the training process')
# parser.add_argument('--num_workers', '-nw', default=4, type=int, help='number of workers in data loader')
# parser.add_argument('--num_threads', '-nt', default=8, type=int, help='number of threads in data loader')
# parser.add_argument('--cuda', '-c', action='store_true', help='whether to train on the GPU')
# parser.add_argument('--mGPU', '-m', action='store_true', help='whether to train on multiple GPUs')
# args = parser.parse_args()
#
# print(args)
#
# config = read_config(args.config_file, args.config_spec)
# train_config = config["training"]
#
#
# i = 0
# while i < 15:
# train_data = OnTheFlyDataset(train_config["dataset_configs"],
# use_cache=True,
# cache_dir='/home/bingo/burst-denoise/dataset/synthetic',
# blind=False,
# dataset_name='{:02d}'.format(i))
# train_loader = DataLoader(train_data, batch_size=1, shuffle=True, num_workers=args.num_workers)
# for index, data in enumerate(train_loader):
# print('epoch {}, step {} is ok'.format(i, index))
# i += 1
files = os.listdir('/home/bingo/burst-denoise/dataset/synthetic')
files.sort()
for index, f in enumerate(files):
os.rename(os.path.join('/home/bingo/burst-denoise/dataset/synthetic', f),
os.path.join('/home/bingo/burst-denoise/dataset/synthetic', '{:06d}.pth'.format(index)))
| 31,377 | 45.076358 | 193 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/generate_dataset.py
|
import tifffile
import skimage
import numpy as np
import os
import argparse
import glob
import json
from tqdm import tqdm
from sklearn.feature_extraction.image import extract_patches_2d
import torch
from torch.autograd import Variable
from torch import FloatTensor
from data_generation.pipeline import ImageDegradationPipeline
from data_generation.constants import XYZ2sRGB, ProPhotoRGB2XYZ
def numpy2tensor(arr):
if len(arr.shape) < 3:
arr = np.expand_dims(arr, -1)
return FloatTensor(arr).permute(2, 0, 1).unsqueeze(0).float() / 255.0
def tensor2numpy(t, idx=None):
t = torch.clamp(t, 0, 1)
if idx is None:
t = t[0, ...]
else:
t = t[idx, ...]
return t.permute(1, 2, 0).cpu().squeeze().numpy()
parser = argparse.ArgumentParser(description='')
parser.add_argument('--im_folder', required=True, help='path to input images')
parser.add_argument('--out_dir', required=True, help='path to place output')
parser.add_argument('--total_patch', type=int, required=True, help='total number of patches to generate')
parser.add_argument('--patch_per_image', type=int, default=5, help='Number of patch to generate from a single degradation of an image')
parser.add_argument('--patch_sz', type=int, default=256, help='Patch size (square patch for now)')
parser.add_argument('--fraction_train', type=float, default=0.8, help='Fraction of images to use as training')
parser.add_argument('--input_ext', default='tif', help='path to place output')
parser.add_argument('--max_exposure', type=float, default=0.0, help='maximum exposure adjustment in stops')
parser.add_argument('--min_exposure', type=float, default=0.0, help='minimum exposure adjustment in stops')
parser.add_argument('--max_gaussian_noise', type=float, default=0.0, help='maximum gaussian noise std (on range 0 - 1)')
parser.add_argument('--min_gaussian_noise', type=float, default=0.0, help='minimum gaussian noise std (on range 0 - 1)')
parser.add_argument('--max_poisson_noise', type=float, default=0.0, help='maximum poisson noise mult (See image_processing.PoissonNoise for detail)')
parser.add_argument('--min_poisson_noise', type=float, default=0.0, help='minimum poisson noise mult (See image_processing.PoissonNoise for detail)')
parser.add_argument('--skip_degraded', action="store_true", help='Whether to skip degraded images.')
parser.add_argument('--dwn_factor', type=float, default=4, help='Factor to downsample.')
args = parser.parse_args()
im_names = glob.glob(os.path.join(args.im_folder, '*.' + args.input_ext))
im_names = sorted([os.path.basename(i) for i in im_names])
# Create output folder
os.makedirs(args.out_dir, exist_ok=True)
train_dir = os.path.join(args.out_dir, 'train')
test_dir = os.path.join(args.out_dir, 'test')
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
for base_dir in [train_dir, test_dir]:
target_dir = os.path.join(base_dir, 'images', 'target')
degraded_dir = os.path.join(base_dir, 'images', 'degraded')
meta_dir = os.path.join(base_dir, 'meta')
os.makedirs(target_dir, exist_ok=True)
os.makedirs(degraded_dir, exist_ok=True)
os.makedirs(meta_dir, exist_ok=True)
n_count = 0
img_idx = 0
progress_bar = tqdm(total=args.total_patch)
while n_count < args.total_patch:
if img_idx < args.fraction_train * len(im_names):
base_dir = train_dir
else:
base_dir = test_dir
target_dir = os.path.join(base_dir, 'images', 'target')
degraded_dir = os.path.join(base_dir, 'images', 'degraded')
meta_dir = os.path.join(base_dir, 'meta')
name = im_names[img_idx]
path = os.path.join(args.im_folder, name)
# We know 5k dataset is 16-bits.
raw_im = tifffile.imread(path).astype('float32') / 65536.0
raw_im = FloatTensor(raw_im).permute(2, 0, 1).unsqueeze(0)
# Define pipeline
poisson_k = np.random.uniform(args.min_poisson_noise, args.max_poisson_noise)
read_noise_sigma = np.random.uniform(args.min_gaussian_noise, args.max_gaussian_noise)
dwn_factor = args.dwn_factor
exp_adjustment = np.random.uniform(args.min_exposure, args.max_exposure)
configs_prepreprocess = [
('UndoProPhotoRGBGamma', {}),
# Convert to sRGB
('ColorSpaceConversionMatrix', {'matrix': torch.matmul(XYZ2sRGB, ProPhotoRGB2XYZ)}),
]
configs_preprocess = [
# Blur and downsample to reduce noise
('GaussianBlur', {'sigma_x': dwn_factor}),
('PytorchResizing', {'resizing_factor': 1.0/dwn_factor, 'mode': 'nearest'})
]
configs_degrade = [
('ExposureAdjustment', {'nstops': exp_adjustment}),
# ('MotionBlur', {'amt': [3, 2], 'direction': [0, 45,]}),
('BayerMosaicking', {}),
# Add artificial noise.
('PoissonNoise',{'sigma': FloatTensor([poisson_k] * 3), 'mosaick_pattern': 'bayer'}),
('GaussianNoise',{'sigma': FloatTensor([read_noise_sigma] * 3), 'mosaick_pattern': 'bayer'}),
('PixelClip', {}),
('ExposureAdjustment', {'nstops': -exp_adjustment}),
('PixelClip', {}),
('NaiveDemosaicking', {}),
('PixelClip', {}),
]
configs_denoise = [
('DenoisingBilateral',{'sigma_s': 1.0, 'sigma_r': 0.1}),
('PixelClip', {}),
('sRGBGamma', {}),
]
pipeline_prepreprocess = ImageDegradationPipeline(configs_prepreprocess)
pipeline_preprocess = ImageDegradationPipeline(configs_preprocess)
pipeline_degrade = ImageDegradationPipeline(configs_degrade)
pipeline_denoise = ImageDegradationPipeline(configs_denoise)
demosaicked = pipeline_prepreprocess(raw_im)
preprocessed = pipeline_preprocess(demosaicked)
degraded = pipeline_degrade(preprocessed)
denoised = pipeline_denoise(degraded)
denoised_numpy = tensor2numpy(denoised)
preprocessed_numpy = tensor2numpy(preprocessed)
stacked = np.concatenate((denoised_numpy, preprocessed_numpy), axis=-1)
patches = extract_patches_2d(stacked,
(args.patch_sz, args.patch_sz),
args.patch_per_image)
degraded_patches, target_patches = np.split(patches, 2, axis=-1)
target_patches = np.split(target_patches, target_patches.shape[0])
degraded_patches = np.split(degraded_patches, degraded_patches.shape[0])
meta = dict(orig=name,
poisson_k=poisson_k,
read_noise_sigma=read_noise_sigma,
exp_adjustment=exp_adjustment,
dwn_factor=dwn_factor)
n_patches = len(degraded_patches)
for i in range(n_patches):
patch_idx = n_count + i + 1
degraded = np.clip(degraded_patches[i] * 255.0, 0, 255).astype('uint8')
if not args.skip_degraded:
skimage.io.imsave(os.path.join(degraded_dir,
"{:06d}.png".format(patch_idx)
),
np.squeeze(degraded))
np.save(os.path.join(target_dir,
"{:06d}.npy".format(patch_idx)
),
np.squeeze(target_patches[i]))
with open(os.path.join(meta_dir,
'{:06d}.json'.format(patch_idx)),
'w') as f:
json.dump(meta, f)
n_count += n_patches
img_idx = (img_idx + 1) % len(im_names)
progress_bar.update(n_patches)
progress_bar.close()
| 7,397 | 41.034091 | 149 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/constants.py
|
import math
import torch
from torch import FloatTensor
XYZ2sRGB = FloatTensor([[ 3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[ 0.0557, -0.2040, 1.0570]])
# http://brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
ProPhotoRGB2XYZ = FloatTensor([[0.7976749, 0.1351917, 0.0313534],
[0.2880402, 0.7118741, 0.0000857],
[0.0000000, 0.0000000, 0.8252100]])
RGB2YUV = FloatTensor([[0.29900, 0.5870, 0.1140],
[-.33750, -.6625, 1.0000],
[1.00000, -.8374, -.1626]])
YUV2RGB = FloatTensor([[1.0, 0.0000, 0.7010],
[1.0, -.1721, -.3571],
[1.0, 0.8860, 0.0]])
xyz_color_matching = {
"lambda": FloatTensor([390,395,400,405,410,415,420,425,430,435,440,445,450,455,460,465,470,475,480,485,490,495,500,505,510,515,520,525,530,535,540,545,550,555,560,565,570,575,580,585,590,595,600,605,610,615,620,625,630,635,640,645,650,655,660,665,670,675,680,685,690,695,700,705,710,715,720,725,730,735,740,745,750,755,760,765,770,775,780,785,790,795,800,805,810,815,820,825,830]),
"xyz": FloatTensor([[0.003769647,0.009382967,0.02214302,0.04742986,0.08953803,0.1446214,0.2035729,0.2488523,0.2918246,0.3227087,0.3482554,0.3418483,0.3224637,0.2826646,0.2485254,0.2219781,0.1806905,0.129192,0.08182895,0.04600865,0.02083981,0.007097731,0.002461588,0.003649178,0.01556989,0.04315171,0.07962917,0.1268468,0.1818026,0.2405015,0.3098117,0.3804244,0.4494206,0.5280233,0.6133784,0.7016774,0.796775,0.8853376,0.9638388,1.051011,1.109767,1.14362,1.151033,1.134757,1.083928,1.007344,0.9142877,0.8135565,0.6924717,0.575541,0.4731224,0.3844986,0.2997374,0.2277792,0.1707914,0.1263808,0.09224597,0.0663996,0.04710606,0.03292138,0.02262306,0.01575417,0.01096778,0.00760875,0.005214608,0.003569452,0.002464821,0.001703876,0.001186238,0.000826954,0.00057583,0.00040583,0.000285658,0.000202185,0.000143827,0.000102469,7.34755E-05,5.25987E-05,3.80611E-05,2.75822E-05,2.00412E-05,1.45879E-05,1.06814E-05,7.85752E-06,5.76828E-06,4.25917E-06,3.16777E-06,2.35872E-06,1.76247E-06],
[0.000414616,0.001059646,0.002452194,0.004971717,0.00907986,0.01429377,0.02027369,0.02612106,0.03319038,0.0415794,0.05033657,0.05743393,0.06472352,0.07238339,0.08514816,0.1060145,0.1298957,0.1535066,0.1788048,0.2064828,0.237916,0.285068,0.3483536,0.4277595,0.5204972,0.6206256,0.718089,0.7946448,0.8575799,0.9071347,0.9544675,0.9814106,0.9890228,0.9994608,0.9967737,0.9902549,0.9732611,0.9424569,0.8963613,0.8587203,0.8115868,0.7544785,0.6918553,0.6270066,0.5583746,0.489595,0.4229897,0.3609245,0.2980865,0.2416902,0.1943124,0.1547397,0.119312,0.08979594,0.06671045,0.04899699,0.03559982,0.02554223,0.01807939,0.01261573,0.008661284,0.006027677,0.004195941,0.002910864,0.001995557,0.001367022,0.000944727,0.000653705,0.000455597,0.000317974,0.000221745,0.000156557,0.000110393,7.82744E-05,5.57886E-05,3.98188E-05,2.86018E-05,2.05126E-05,1.48724E-05,1.08E-05,7.86392E-06,5.73694E-06,4.2116E-06,3.10656E-06,2.28679E-06,1.69315E-06,1.26256E-06,9.42251E-07,7.05386E-07],
[0.0184726,0.04609784,0.109609,0.2369246,0.4508369,0.7378822,1.051821,1.305008,1.552826,1.74828,1.917479,1.918437,1.848545,1.664439,1.522157,1.42844,1.25061,0.9991789,0.7552379,0.5617313,0.4099313,0.3105939,0.2376753,0.1720018,0.1176796,0.08283548,0.05650407,0.03751912,0.02438164,0.01566174,0.00984647,0.006131421,0.003790291,0.002327186,0.001432128,0.000882253,0.000545242,0.000338674,0.000211777,0.000133503,8.49447E-05,5.46071E-05,3.54966E-05,2.33474E-05,1.55463E-05,1.04839E-05,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
}
# default 50% quality
default_jpeg_quantization_matrix = \
FloatTensor([[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]])
# Photoshop quantization.
# https://www.impulseadventure.com/photo/jpeg-quantization.html
photoshop_jpeg_quantization_lum = \
[
# Luminance Level 0
FloatTensor([
[32, 33, 51, 81, 66, 39, 34, 17],
[33, 36, 48, 47, 28, 23, 12, 12],
[51, 48, 47, 28, 23, 12, 12, 12],
[81, 47, 28, 23, 12, 12, 12, 12],
[66, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 1
FloatTensor([
[27, 26, 41, 65, 66, 39, 34, 17],
[26, 29, 38, 47, 28, 23, 12, 12],
[41, 38, 47, 28, 23, 12, 12, 12],
[65, 47, 28, 23, 12, 12, 12, 12],
[66, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 2
FloatTensor([
[20, 17, 26, 41, 51, 39, 34, 17],
[17, 18, 24, 39, 28, 23, 12, 12],
[26, 24, 32, 28, 23, 12, 12, 12],
[41, 39, 28, 23, 12, 12, 12, 12],
[51, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 3
FloatTensor([
[18, 14, 22, 35, 44, 39, 34, 17],
[14, 16, 21, 34, 28, 23, 12, 12],
[22, 21, 27, 28, 23, 12, 12, 12],
[35, 34, 28, 23, 12, 12, 12, 12],
[44, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 4
FloatTensor([
[16, 11, 17, 27, 34, 39, 34, 17],
[11, 12, 16, 26, 28, 23, 12, 12],
[17, 16, 21, 28, 23, 12, 12, 12],
[27, 26, 28, 23, 12, 12, 12, 12],
[34, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 5
FloatTensor([
[12, 8, 13, 21, 26, 32, 34, 17],
[8, 9, 12, 20, 27, 23, 12, 12],
[13, 12, 16, 26, 23, 12, 12, 12],
[21, 20, 26, 23, 12, 12, 12, 12],
[26, 27, 23, 12, 12, 12, 12, 12],
[32, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 6
FloatTensor([
[8, 6, 9, 14, 17, 21, 28, 17],
[6, 6, 8, 13, 18, 23, 12, 12],
[9, 8, 11, 17, 23, 12, 12, 12],
[14, 13, 17, 23, 12, 12, 12, 12],
[17, 18, 23, 12, 12, 12, 12, 12],
[21, 23, 12, 12, 12, 12, 12, 12],
[28, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 7
FloatTensor([
[10, 7, 11, 18, 22, 27, 34, 17],
[7, 8, 10, 17, 23, 23, 12, 12],
[11, 10, 14, 22, 23, 12, 12, 12],
[18, 17, 22, 23, 12, 12, 12, 12],
[22, 23, 23, 12, 12, 12, 12, 12],
[27, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 8
FloatTensor([
[6, 4, 7, 11, 14, 17, 22, 17],
[4, 5, 6, 10, 14, 19, 12, 12],
[7, 6, 8, 14, 19, 12, 12, 12],
[11, 10, 14, 19, 12, 12, 12, 12],
[14, 14, 19, 12, 12, 12, 12, 12],
[17, 19, 12, 12, 12, 12, 12, 12],
[22, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 9
FloatTensor([
[4, 3, 4, 7, 9, 11, 14, 17],
[3, 3, 4, 7, 9, 12, 12, 12],
[4, 4, 5, 9, 12, 12, 12, 12],
[7, 7, 9, 12, 12, 12, 12, 12],
[9, 9, 12, 12, 12, 12, 12, 12],
[11, 12, 12, 12, 12, 12, 12, 12],
[14, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 10
FloatTensor([
[2, 2, 3, 4, 5, 6, 8, 11],
[2, 2, 2, 4, 5, 7, 9, 11],
[3, 2, 3, 5, 7, 9, 11, 12],
[4, 4, 5, 7, 9, 11, 12, 12],
[5, 5, 7, 9, 11, 12, 12, 12],
[6, 7, 9, 11, 12, 12, 12, 12],
[8, 9, 11, 12, 12, 12, 12, 12],
[11, 11, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 11
FloatTensor([
[1, 1, 1, 2, 3, 3, 4, 5],
[1, 1, 1, 2, 3, 4, 4, 6],
[1, 1, 2, 3, 4, 4, 5, 7],
[2, 2, 3, 4, 4, 5, 7, 8],
[3, 3, 4, 4, 5, 7, 8, 8],
[3, 4, 4, 5, 7, 8, 8, 8],
[4, 4, 5, 7, 8, 8, 8, 8],
[5, 6, 7, 8, 8, 8, 8, 8],
]),
# Luminance Level 12
FloatTensor([
[1, 1, 1, 1, 1, 1, 1, 2],
[1, 1, 1, 1, 1, 1, 1, 2],
[1, 1, 1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 1, 2, 2, 3],
[1, 1, 1, 1, 2, 2, 3, 3],
[1, 1, 1, 2, 2, 3, 3, 3],
[1, 1, 2, 2, 3, 3, 3, 3],
[2, 2, 2, 3, 3, 3, 3, 3],
]),
]
photoshop_jpeg_quantization_chrom = \
[
# Chrominance Level 0
FloatTensor([
[34, 51, 52, 34, 20, 20, 17, 17],
[51, 38, 24, 14, 14, 12, 12, 12],
[52, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 1
FloatTensor([
[29, 41, 52, 34, 20, 20, 17, 17],
[41, 38, 24, 14, 14, 12, 12, 12],
[52, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 2
FloatTensor([
[21, 26, 33, 34, 20, 20, 17, 17],
[26, 29, 24, 14, 14, 12, 12, 12],
[33, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 3
FloatTensor([
[20, 22, 29, 34, 20, 20, 17, 17],
[22, 25, 24, 14, 14, 12, 12, 12],
[29, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 4
FloatTensor([
[17, 17, 22, 34, 20, 20, 17, 17],
[17, 19, 22, 14, 14, 12, 12, 12],
[22, 22, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 5
FloatTensor([
[13, 13, 17, 27, 20, 20, 17, 17],
[13, 14, 17, 14, 14, 12, 12, 12],
[17, 17, 14, 14, 12, 12, 12, 12],
[27, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 6
FloatTensor([
[9, 9, 11, 18, 20, 20, 17, 17],
[9, 10, 11, 14, 14, 12, 12, 12],
[11, 11, 14, 14, 12, 12, 12, 12],
[18, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 7
FloatTensor([
[11, 14, 31, 34, 20, 20, 17, 17],
[14, 19, 24, 14, 14, 12, 12, 12],
[31, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 8
FloatTensor([
[7, 9, 19, 34, 20, 20, 17, 17],
[9, 12, 19, 14, 14, 12, 12, 12],
[19, 19, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 9
FloatTensor([
[4, 6, 12, 22, 20, 20, 17, 17],
[6, 8, 12, 14, 14, 12, 12, 12],
[12, 12, 14, 14, 12, 12, 12, 12],
[22, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 10
FloatTensor([
[3, 3, 7, 13, 15, 15, 15, 15],
[3, 4, 7, 13, 14, 12, 12, 12],
[7, 7, 13, 14, 12, 12, 12, 12],
[13, 13, 14, 12, 12, 12, 12, 12],
[15, 14, 12, 12, 12, 12, 12, 12],
[15, 12, 12, 12, 12, 12, 12, 12],
[15, 12, 12, 12, 12, 12, 12, 12],
[15, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 11
FloatTensor([
[1, 2, 4, 7, 8, 8, 8, 8],
[2, 2, 4, 7, 8, 8, 8, 8],
[4, 4, 7, 8, 8, 8, 8, 8],
[7, 7, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
]),
# Chrominance Level 12
FloatTensor([
[1, 1, 1, 2, 3, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3, 3],
[1, 1, 2, 3, 3, 3, 3, 3],
[2, 2, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
]),
]
# 0-6 have subsampling, 7-12 don't.
photoshop_chroma_subsampling = [True] * 7 + [False] * 6
# DCT Coefficients
# The inverse coefficient is the same.
def _DCT_coeff():
v = torch.arange(8).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand((8, 8, 8, 8)).float()
u = torch.arange(8).unsqueeze( 0).unsqueeze(-1).unsqueeze(-1).expand((8, 8, 8, 8)).float()
y = torch.arange(8).unsqueeze( 0).unsqueeze( 0).unsqueeze(-1).expand((8, 8, 8, 8)).float()
x = torch.arange(8).unsqueeze( 0).unsqueeze( 0).unsqueeze( 0).expand((8, 8, 8, 8)).float()
au = torch.ones((8, 8, 8, 8)).float()
av = torch.ones((8, 8, 8, 8)).float()
av[0, :, ...] = 0.707 # 1 / sqrt(2)
au[:, 0, ...] = 0.707 # 1 / sqrt(2)
coeff = au * av * torch.cos((2*x + 1)*u*math.pi/16.0) \
* torch.cos((2*y + 1)*v*math.pi/16.0)
return coeff * 0.25
DCT_coeff = _DCT_coeff()
| 16,240 | 44.113889 | 990 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/data_utils.py
|
""" Utilities functions.
"""
import numbers
import numpy as np
import torch
from torch import FloatTensor
def random_crop(im, num_patches, w, h=None):
h = w if h is None else h
nw = im.size(-1) - w
nh = im.size(-2) - h
if nw < 0 or nh < 0:
raise RuntimeError("Image is to small {} for the desired size {}". \
format((im.size(-1), im.size(-2)), (w, h))
)
idx_w = np.random.choice(nw + 1, size=num_patches)
idx_h = np.random.choice(nh + 1, size=num_patches)
result = []
for i in range(num_patches):
result.append(im[...,
idx_h[i]:(idx_h[i]+h),
idx_w[i]:(idx_w[i]+w)])
return result
def expand_to_4d_channel(arr):
""" Expand Scalar or 1D dimension to 4D
Assumes that a 1D list represent the channel dimension (2nd dim).
Args:
arr: A scalar or 1D tensor to be expanded to 4D
"""
# for scalar and 1D tensor, add batch dimensions.
while len(arr.size()) < 2:
arr = arr.unsqueeze(0)
# regain spatial dimension
while len(arr.size()) < 4:
arr = arr.unsqueeze(-1)
return arr
def expand_to_4d_batch(arr):
""" Expand Scalar or 1D dimension to 4D
Assumes that a 1D list represent the batch dimension (1st dim).
Args:
arr: A scalar or 1D tensor to be expanded to 4D
"""
# regain spatial dimension and channel dimension
while len(arr.size()) < 4:
arr = arr.unsqueeze(-1)
return arr
def is_number(a):
return isinstance(a, numbers.Number)
def python_to_tensor(a):
if isinstance(a, numbers.Number):
return FloatTensor([a])
return a
def number_to_list(a):
if isinstance(a, numbers.Number):
a = [a]
return a
def cuda_like(arr, src):
""" Move arr on to GPU/CPU like src
"""
if src.is_cuda:
return arr.cuda()
else:
return arr.cpu()
def mosaick_multiply(mult, im, mosaick_pattern):
""" mosaick pattern-aware multiply.
Args:
mult: n-list of multiplier, where n is number of image channel.
A batch dimension is optional.
im: tensor of size n_batch x n_channel x width x height.
mosaick_pattern: None or string indicating the mosaick pattern.
"""
if mosaick_pattern is None:
return im * expand_to_4d_channel(mult)
elif mosaick_pattern == "bayer":
# Assume GRGB format.
mult = expand_to_4d_channel(mult)
h, w = im.size(2), im.size(3)
x = torch.arange(w).unsqueeze(0).expand(h, -1)
y = torch.arange(h).unsqueeze(-1).expand(-1, w)
x = x.unsqueeze(0).unsqueeze(0)
y = y.unsqueeze(0).unsqueeze(0)
if im.is_cuda:
x = x.cuda()
y = y.cuda()
odd_x = torch.fmod(x, 2)
odd_y = torch.fmod(y, 2)
is_green = odd_x == odd_y
is_red = odd_x * (1.0 - odd_y)
is_blue = (1.0 - odd_x) * odd_y
mult = mult.expand(-1, 3, -1, -1)
return im * mult[:, 0:1, ...] * is_red.float() + \
im * mult[:, 1:2, ...] * is_green.float() + \
im * mult[:, 2:3, ...] * is_blue.float()
else:
raise ValueError("Mosaick pattern, {}, not supported." \
.format(mosaick_pattern))
| 3,336 | 25.696 | 76 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/image_io.py
|
""" I/O module
This unit deals with the nitty gritty of reading in DSLR raw camera and
various other formats.
"""
import numpy as np
import rawpy
def read_raw(path, n_bits=None):
with rawpy.imread(path) as raw:
im_ = raw.raw_image_visible.copy()
# subtract black level
im = np.zeros(im_.shape, dtype='float32')
for i in range(len(raw.black_level_per_channel)):
im += (im_ - raw.black_level_per_channel[i]) * (raw.raw_colors_visible == i).astype('float32')
if n_bits is None:
im /= np.amax(im)
else:
im /= np.power(2, n_bits)
# shift bayer pattern
red_idx = raw.color_desc.find(b'R')
if red_idx == -1:
print("Warning: Red is not in color description.")
red_idx = 0
raw_pattern = raw.raw_colors_visible[:8, :8].copy()
red_pos = np.asarray(np.where(raw_pattern == red_idx))[:,0]
row_offset = red_pos[0]
# So that we start with GR
col_offset = red_pos[1] + 1
im = im[row_offset:, col_offset:]
return im, \
raw.rgb_xyz_matrix, \
raw.camera_whitebalance
| 1,176 | 29.973684 | 106 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/ahd_demosaicking.py
|
import numpy as np
import scipy
from scipy.io import savemat
from .constants import RGB2YUV
from scipy.interpolate import interp2d
_RGB2YUV = RGB2YUV.cpu().data.numpy()
def ahd_demosaicking(mosaic, delta=1, sobel_sz=3, avg_sz=3):
"""Demosaicking using AHD algorithm.
No median filtering, assume GRBG format.
Args:
delta: neighborhood size for calculating homogeneity.
sobel_sz: size of sobel kernels.
avg_sz: size of averaging kernel for homogeneity.
"""
Yx = _demosaickX(mosaic)
Yy = _demosaickY(mosaic)
YxYUV = _rgb2YUV(Yx)
YyYUV = _rgb2YUV(Yy)
epsL, epsCsq = _adaptive_param(YxYUV, YyYUV, sobel_sz)
Hx = _homogeniety(YxYUV, delta, epsL, epsCsq)
Hy = _homogeniety(YyYUV, delta, epsL, epsCsq)
Hx = _conv2(Hx, np.ones((avg_sz, avg_sz)) / float(avg_sz**2))
Hy = _conv2(Hy, np.ones((avg_sz, avg_sz)) / float(avg_sz**2))
mask = (Hx > Hy).astype('float')
mask = np.expand_dims(mask, -1)
output = mask * Yx + (1.0 - mask) * Yy
return np.clip(output, 0.0, 1.0)
# https://stackoverflow.com/questions/9567882/sobel-filter-kernel-of-large-size/41065243#41065243
def _sobel_kernel(sz):
if (sz % 2) == 0:
raise ValueError("Kernel size must be odd ({} received)".format(sz))
kernel = np.zeros((sz, sz))
for i in range(sz):
for j in range(sz):
ii = i - (sz // 2)
jj = j - (sz // 2)
kernel[i, j] = ii / (ii**2 + jj**2) if ii != 0 else 0
return kernel
def _interp2d(arr, new_sz):
f = interp2d(x=np.linspace(0, 1, arr.shape[1]),
y=np.linspace(0, 1, arr.shape[0]),
z=arr)
return f(np.linspace(0, 1, new_sz[1]), np.linspace(0, 1, new_sz[0]))
def _interp_kernel(m=5, n=3):
# Duplicate row so it works with bilinear interpolation
Hg = np.array([[-0.25, 0.5, 0.5, 0.5, -0.25],[-0.25, 0.5, 0.5, 0.5, -0.25]])
Hr = np.array([[0.25, 0.5, 0.25], [0.5, 1.0, 0.5], [0.25, 0.5, 0.25]])
if m != 5:
Hg = _interp2d(Hg, (2, m))
if n != 3:
Hr = _interp2d(Hr, (n, n))
Hg = Hg[0:1, :]
Hg = Hg / np.sum(Hg[:])
Hr = Hr / np.sum(Hr[:]) * 4
return Hg, Hr
def _conv2(x, k):
return scipy.ndimage.filters.convolve(x, k, mode='reflect')
def _demosaickX(X, transposed=False):
Mr = np.zeros(X.shape)
Mg = np.ones(X.shape)
Mb = np.zeros(X.shape)
Mr[0::2, 1::2] = 1.0
Mb[1::2, 0::2] = 1.0
Mg = Mg - Mr - Mb
# Switch R and B (which got swapped when we transpose X).
if transposed:
Mr, Mb = Mb, Mr
Hg, Hr = _interp_kernel(5, 3)
G = Mg * X + (Mr + Mb) * _conv2(X, Hg)
R = G + _conv2(Mr * (X - G), Hr)
B = G + _conv2(Mb * (X - G), Hr)
R = np.expand_dims(R, -1)
G = np.expand_dims(G, -1)
B = np.expand_dims(B, -1)
return np.concatenate((R,G,B), axis=2)
def _demosaickY(X):
X = X.T
Y = _demosaickX(X, transposed=True)
Y = np.swapaxes(Y, 0, 1)
return Y
def _adaptive_param(X, Y, sz):
sobel_y = _sobel_kernel(sz)
sobel_x = sobel_y.T
eL = np.minimum(abs(_conv2(X[:,:,0], sobel_x)),
abs(_conv2(Y[:,:,0], sobel_y)))
eCsq = np.minimum(_conv2(X[:,:,1], sobel_x)**2 + _conv2(X[:,:,2], sobel_x)**2,
_conv2(Y[:,:,1], sobel_y)**2 + _conv2(Y[:,:,2], sobel_y)**2)
return eL, eCsq
def _rgb2YUV(X):
return np.einsum("ijk,lk->ijl", X, _RGB2YUV)
def _ballset(delta):
index = int(np.ceil(delta))
# initialize
H = np.zeros((index*2+1, index*2+1, (index*2+1)**2))
k = 0;
for i in range(-index, index):
for j in range(-index,index):
if np.sqrt(i**2 + j**2) <= delta:
# included
H[index+i, index+j, k] = 1
k = k + 1
H = H[:,:,:k];
return H
def _homogeniety(X, delta, epsL, epsC_sq):
H = _ballset(delta);
K = np.zeros(X.shape[:2])
for i in range(H.shape[-1]):
# level set
L = abs(_conv2(X[:,:,0], H[:,:,i]) - X[:,:,0]) <= epsL
# color set
C = ((_conv2(X[:,:,1], H[:,:,i]) - X[:,:,1])**2 + \
(_conv2(X[:,:,2], H[:,:,i]) - X[:,:,2])**2) <= epsC_sq;
# metric neighborhood
U = C * L
# homogeneity
K = K + U
return K
| 4,292 | 27.058824 | 97 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/denoise_wavelet.py
|
# coding: utf-8
# Modified from skimage's wavelet.
# https://github.com/scikit-image/scikit-image/blob/f0d48db4c246989182aa01c837d04903bc2330ae/skimage/restoration/_denoise.py
import scipy.stats
import numpy as np
from math import ceil
import warnings
import pywt
import skimage.color as color
from skimage import img_as_float
import numbers
def warn(msg):
warnings.warn(msg)
def _bayes_thresh(details, var):
"""BayesShrink threshold for a zero-mean details coeff array."""
# Equivalent to: dvar = np.var(details) for 0-mean details array
dvar = np.mean(details*details)
eps = np.finfo(details.dtype).eps
thresh = var / np.sqrt(max(dvar - var, eps))
return thresh
def _universal_thresh(img, sigma):
""" Universal threshold used by the VisuShrink method """
return sigma*np.sqrt(2*np.log(img.size))
def _sigma_est_dwt(detail_coeffs, distribution='Gaussian'):
"""Calculate the robust median estimator of the noise standard deviation.
Parameters
----------
detail_coeffs : ndarray
The detail coefficients corresponding to the discrete wavelet
transform of an image.
distribution : str
The underlying noise distribution.
Returns
-------
sigma : float
The estimated noise standard deviation (see section 4.2 of [1]_).
References
----------
.. [1] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI:10.1093/biomet/81.3.425
"""
# Consider regions with detail coefficients exactly zero to be masked out
detail_coeffs = detail_coeffs[np.nonzero(detail_coeffs)]
if distribution.lower() == 'gaussian':
# 75th quantile of the underlying, symmetric noise distribution
denom = scipy.stats.norm.ppf(0.75)
sigma = np.median(np.abs(detail_coeffs)) / denom
else:
raise ValueError("Only Gaussian noise estimation is currently "
"supported")
return sigma
def _wavelet_threshold(image, wavelet, method=None, threshold=None,
sigma=None, mode='soft', wavelet_levels=None):
"""Perform wavelet thresholding.
Parameters
----------
image : ndarray (2d or 3d) of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
wavelet : string
The type of wavelet to perform. Can be any of the options
pywt.wavelist outputs. For example, this may be any of ``{db1, db2,
db3, db4, haar}``.
method : {'BayesShrink', 'VisuShrink'}, optional
Thresholding method to be used. The currently supported methods are
"BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a
user-specified ``threshold`` must be supplied instead.
threshold : float, optional
The thresholding value to apply during wavelet coefficient
thresholding. The default value (None) uses the selected ``method`` to
estimate appropriate threshold(s) for noise removal.
sigma : float, optional
The standard deviation of the noise. The noise is estimated when sigma
is None (the default) by the method in [2]_.
mode : {'soft', 'hard'}, optional
An optional argument to choose the type of denoising performed. It
noted that choosing soft thresholding given additive noise finds the
best approximation of the original image.
wavelet_levels : int or None, optional
The number of wavelet decomposition levels to use. The default is
three less than the maximum number of possible decomposition levels
(see Notes below).
Returns
-------
out : ndarray
Denoised image.
References
----------
.. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet
thresholding for image denoising and compression." Image Processing,
IEEE Transactions on 9.9 (2000): 1532-1546.
DOI: 10.1109/83.862633
.. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI: 10.1093/biomet/81.3.425
"""
wavelet = pywt.Wavelet(wavelet)
# original_extent is used to workaround PyWavelets issue #80
# odd-sized input results in an image with 1 extra sample after waverecn
original_extent = tuple(slice(s) for s in image.shape)
# Determine the number of wavelet decomposition levels
if wavelet_levels is None:
# Determine the maximum number of possible levels for image
dlen = wavelet.dec_len
wavelet_levels = np.min(
[pywt.dwt_max_level(s, dlen) for s in image.shape])
# Skip coarsest wavelet scales (see Notes in docstring).
wavelet_levels = max(wavelet_levels - 3, 1)
coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels)
# Detail coefficients at each decomposition level
dcoeffs = coeffs[1:]
if sigma is None:
# Estimate the noise via the method in [2]_
detail_coeffs = dcoeffs[-1]['d' * image.ndim]
sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
if method is not None and threshold is not None:
warn(("Thresholding method {} selected. The user-specified threshold "
"will be ignored.").format(method))
if threshold is None:
var = sigma**2
if method is None:
raise ValueError(
"If method is None, a threshold must be provided.")
elif method == "BayesShrink":
# The BayesShrink thresholds from [1]_ in docstring
threshold = [{key: _bayes_thresh(level[key], var) for key in level}
for level in dcoeffs]
elif method == "VisuShrink":
# The VisuShrink thresholds from [2]_ in docstring
threshold = _universal_thresh(image, sigma)
else:
raise ValueError("Unrecognized method: {}".format(method))
elif isinstance(threshold, list) or isinstance(threshold, tuple):
# convert to list in case it's a tuple
threshold = list(threshold)
# Supplement threshold if it is not long enough
# higher level is lower-frequency details
if len(threshold) < len(dcoeffs):
threshold = threshold + ([threshold[-1]] * (len(dcoeffs) - len(threshold)))
threshold = [{key: threshold[i] for key in level}
for i, level in enumerate(dcoeffs)]
if np.isscalar(threshold):
# A single threshold for all coefficient arrays
denoised_detail = [{key: pywt.threshold(level[key],
value=threshold,
mode=mode) for key in level}
for level in dcoeffs]
else:
# Dict of unique threshold coefficients for each detail coeff. array
denoised_detail = [{key: pywt.threshold(level[key],
value=thresh[key],
mode=mode) for key in level}
for thresh, level in zip(threshold, dcoeffs)]
denoised_coeffs = [coeffs[0]] + denoised_detail
return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]
def denoise_wavelet(image, sigma=None, wavelet='db1', mode='soft',
wavelet_levels=None, multichannel=False,
convert2ycbcr=False, method='BayesShrink',
threshold=None
):
"""Perform wavelet denoising on an image.
Parameters
----------
image : ndarray ([M[, N[, ...P]][, C]) of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
sigma : float or list, optional
The noise standard deviation used when computing the wavelet detail
coefficient threshold(s). When None (default), the noise standard
deviation is estimated via the method in [2]_.
wavelet : string, optional
The type of wavelet to perform and can be any of the options
``pywt.wavelist`` outputs. The default is `'db1'`. For example,
``wavelet`` can be any of ``{'db2', 'haar', 'sym9'}`` and many more.
mode : {'soft', 'hard'}, optional
An optional argument to choose the type of denoising performed. It
noted that choosing soft thresholding given additive noise finds the
best approximation of the original image.
wavelet_levels : int or None, optional
The number of wavelet decomposition levels to use. The default is
three less than the maximum number of possible decomposition levels.
multichannel : bool, optional
Apply wavelet denoising separately for each channel (where channels
correspond to the final axis of the array).
convert2ycbcr : bool, optional
If True and multichannel True, do the wavelet denoising in the YCbCr
colorspace instead of the RGB color space. This typically results in
better performance for RGB images.
method : {'BayesShrink', 'VisuShrink'}, optional
Thresholding method to be used. The currently supported methods are
"BayesShrink" [1]_ and "VisuShrink" [2]_. Defaults to "BayesShrink".
Returns
-------
out : ndarray
Denoised image.
Notes
-----
The wavelet domain is a sparse representation of the image, and can be
thought of similarly to the frequency domain of the Fourier transform.
Sparse representations have most values zero or near-zero and truly random
noise is (usually) represented by many small values in the wavelet domain.
Setting all values below some threshold to 0 reduces the noise in the
image, but larger thresholds also decrease the detail present in the image.
If the input is 3D, this function performs wavelet denoising on each color
plane separately. The output image is clipped between either [-1, 1] and
[0, 1] depending on the input image range.
When YCbCr conversion is done, every color channel is scaled between 0
and 1, and `sigma` values are applied to these scaled color channels.
Many wavelet coefficient thresholding approaches have been proposed. By
default, ``denoise_wavelet`` applies BayesShrink, which is an adaptive
thresholding method that computes separate thresholds for each wavelet
sub-band as described in [1]_.
If ``method == "VisuShrink"``, a single "universal threshold" is applied to
all wavelet detail coefficients as described in [2]_. This threshold
is designed to remove all Gaussian noise at a given ``sigma`` with high
probability, but tends to produce images that appear overly smooth.
References
----------
.. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet
thresholding for image denoising and compression." Image Processing,
IEEE Transactions on 9.9 (2000): 1532-1546.
DOI: 10.1109/83.862633
.. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI: 10.1093/biomet/81.3.425
Examples
--------
>>> from skimage import color, data
>>> img = img_as_float(data.astronaut())
>>> img = color.rgb2gray(img)
>>> img += 0.1 * np.random.randn(*img.shape)
>>> img = np.clip(img, 0, 1)
>>> denoised_img = denoise_wavelet(img, sigma=0.1)
"""
if method not in ["BayesShrink", "VisuShrink"]:
raise ValueError(
('Invalid method: {}. The currently supported methods are '
'"BayesShrink" and "VisuShrink"').format(method))
image = img_as_float(image)
if multichannel:
if isinstance(sigma, numbers.Number) or sigma is None:
sigma = [sigma] * image.shape[-1]
if multichannel:
if convert2ycbcr:
out = color.rgb2ycbcr(image)
for i in range(3):
# renormalizing this color channel to live in [0, 1]
min, max = out[..., i].min(), out[..., i].max()
channel = out[..., i] - min
channel /= max - min
out[..., i] = denoise_wavelet(channel, wavelet=wavelet,
method=method, sigma=sigma[i],
mode=mode,
wavelet_levels=wavelet_levels,
threshold=threshold
)
out[..., i] = out[..., i] * (max - min)
out[..., i] += min
out = color.ycbcr2rgb(out)
else:
out = np.empty_like(image)
for c in range(image.shape[-1]):
out[..., c] = _wavelet_threshold(image[..., c],
wavelet=wavelet,
method=method,
sigma=sigma[c], mode=mode,
wavelet_levels=wavelet_levels,
threshold=threshold)
else:
out = _wavelet_threshold(image, wavelet=wavelet, method=method,
sigma=sigma, mode=mode,
wavelet_levels=wavelet_levels,
threshold=threshold)
clip_range = (-1, 1) if image.min() < 0 else (0, 1)
return np.clip(out, *clip_range)
def estimate_sigma(image, average_sigmas=False, multichannel=False):
"""
Robust wavelet-based estimator of the (Gaussian) noise standard deviation.
Parameters
----------
image : ndarray
Image for which to estimate the noise standard deviation.
average_sigmas : bool, optional
If true, average the channel estimates of `sigma`. Otherwise return
a list of sigmas corresponding to each channel.
multichannel : bool
Estimate sigma separately for each channel.
Returns
-------
sigma : float or list
Estimated noise standard deviation(s). If `multichannel` is True and
`average_sigmas` is False, a separate noise estimate for each channel
is returned. Otherwise, the average of the individual channel
estimates is returned.
Notes
-----
This function assumes the noise follows a Gaussian distribution. The
estimation algorithm is based on the median absolute deviation of the
wavelet detail coefficients as described in section 4.2 of [1]_.
References
----------
.. [1] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
DOI:10.1093/biomet/81.3.425
Examples
--------
>>> import skimage.data
>>> from skimage import img_as_float
>>> img = img_as_float(skimage.data.camera())
>>> sigma = 0.1
>>> img = img + sigma * np.random.standard_normal(img.shape)
>>> sigma_hat = estimate_sigma(img, multichannel=False)
"""
if multichannel:
nchannels = image.shape[-1]
sigmas = [estimate_sigma(
image[..., c], multichannel=False) for c in range(nchannels)]
if average_sigmas:
sigmas = np.mean(sigmas)
return sigmas
elif image.shape[-1] <= 4:
msg = ("image is size {0} on the last axis, but multichannel is "
"False. If this is a color image, please set multichannel "
"to True for proper noise estimation.")
warn(msg.format(image.shape[-1]))
coeffs = pywt.dwtn(image, wavelet='db2')
detail_coeffs = coeffs['d' * image.ndim]
return _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
| 16,124 | 41.101828 | 124 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/image_processing.py
|
import torch
import torch.nn as nn
from torch import FloatTensor, IntTensor
# For drawing motion blur kernel.
import numpy as np
import cv2
import scipy
import functools
import math
from .data_utils import mosaick_multiply, expand_to_4d_batch
from .data_utils import python_to_tensor, cuda_like, number_to_list, is_number
from .kernel import gausskern1d, gausskern2d
from .constants import xyz_color_matching, XYZ2sRGB
from .constants import RGB2YUV, YUV2RGB
from .constants import DCT_coeff
from .constants import photoshop_jpeg_quantization_lum
from .constants import photoshop_jpeg_quantization_chrom
from .constants import photoshop_chroma_subsampling
from .ahd_demosaicking import ahd_demosaicking
from utils.image_utils import check_nan_tensor
import skimage
from .denoise_wavelet import denoise_wavelet as sk_denoise_wavelet
try:
from halide.gradient_apps.gapps import functions as halide_funcs
HAS_HALIDE = True
except:
HAS_HALIDE = False
DEBUG = False
def _has_halide():
return HAS_HALIDE
# TODO: Check if I need to set required_grad properly on all constant tensors.
class IdentityModule(nn.Module):
"""Dummy Class for testing."""
def __init__(self):
super().__init__()
def forward(self, image):
return image.copy()
# Halide实现
# Cellphone Image Processing
class DenoisingBilateral(nn.Module):
# TODO: support batch
# TODO: support GPU.
def __init__(self,
sigma_s,
sigma_r,
color_sigma_ratio=5,
filter_lum=True,
filter_chrom=True,
n_iter=1,
guide_transform=None,
_bp=0.004,
color_range_ratio=1):
""" Apply Gaussian bilateral filter to denoise image.
Args:
sigma_s: stdev in spatial dimension.
sigma_r: stdev in the range dimension.
color_sigma_ratio: multiplier for spatial sigma for filtering
chrominance.
filter_lum: whether or not to filter luminance (useful if want to
filter chrominance only).
filter_chrom: same as filter_lum but for chrominance.
n_iter: number of times to apply this filter.
guide_transform: transformation to apply to the guide map. Must be
'sqrt', 'log', None, or a number. If a number, this is use as
the exponent to transform the guide according to power law.
_bp: Black point for log transform. This is used to prevent taking
log of zeros or negative numbers. Must be positive.
color_range_ratio: multiplier for range sigma for filtering
chrominance.
"""
super().__init__()
self.sigma_s = sigma_s
self.sigma_r = sigma_r
self.color_sigma_ratio = color_sigma_ratio
self.color_range_ratio = color_range_ratio
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
self.filter_lum = filter_lum
self.filter_chrom = filter_chrom
self.n_iter = n_iter
self.guide_transform = guide_transform
self._bp = _bp
if self.guide_transform not in ['sqrt', 'log', None] and \
not (is_number(self.guide_transform)):
raise ValueError('Invalid guide transformation received: {}'.format(guide_transform))
if self.guide_transform == 'sqrt':
self.guide_transform = 0.5
def forward(self, image):
if not _has_halide():
raise RuntimeError("Need halide in order to run this")
if DEBUG and check_nan_tensor(image):
print("Denoising input has NAN!")
self._filter_s = FloatTensor(gausskern1d(self.sigma_s))
self._filter_s_2 = FloatTensor(gausskern1d(3.0 * self.sigma_s))
self._filter_s_color = FloatTensor(gausskern1d(self.color_sigma_ratio * self.sigma_s))
self._filter_r = FloatTensor(gausskern1d(self.sigma_r))
self._filter_r_color = FloatTensor(gausskern1d(self.sigma_r * self.color_range_ratio))
self._filter_s = cuda_like(self._filter_s, image)
self._filter_s_2 = cuda_like(self._filter_s_2, image)
self._filter_s_color = cuda_like(self._filter_s_color, image)
self._filter_r = cuda_like(self._filter_r, image)
yuv = self.rgb2yuv(image)
for i in range(self.n_iter):
yuv = self._forward(yuv)
output = self.yuv2rgb(yuv)
if DEBUG and check_nan_tensor(output):
print("Denoising output has NAN!")
return output
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
guide = lum[:, 0, ...]
if is_number(self.guide_transform):
guide = self._gamma_compression(guide, self.guide_transform)
elif self.guide_transform == 'log':
guide = self._log_compression(guide, self._bp)
guide = torch.clamp(guide, 0.0, 1.0)
out_yuv = yuv.clone()
if self.filter_lum:
out_lum = halide_funcs.BilateralGrid.apply(lum,
guide,
self._filter_s,
self._filter_r)
out_yuv[:, 0:1, ...] = out_lum
if self.filter_chrom:
out_yuv[:, 1:3, ...] = halide_funcs.BilateralGrid.apply(yuv[:, 1:3, ...],
out_yuv[:, 0, ...],
self._filter_s_color,
self._filter_r_color)
return out_yuv
@staticmethod
def _gamma_compression(lum, gamma):
return torch.pow(torch.clamp(lum, 0), gamma)
@staticmethod
def _undo_gamma_compression(lum, gamma):
return torch.pow(torch.clamp(lum, 0), 1.0 / gamma)
@staticmethod
def _log_compression(lum, bp):
# Just clamp
log_bp = np.log(bp)
lum = torch.log(torch.clamp(lum, bp))
lum = torch.clamp((lum - log_bp) / (-log_bp), 0, 1)
return lum
@staticmethod
def _undo_log_compression(lum, bp):
# Add and rescale
log_bp = np.log(bp)
log_1_bp = np.log(1.0 + bp)
lum = (lum * (log_1_bp - log_bp)) + log_bp
lum = (torch.exp(lum) - bp)
return lum
# 双边滤波非差分实现 不使用Halide
class DenoisingSKImageBilateralNonDifferentiable(DenoisingBilateral):
def forward(self, image):
if DEBUG and check_nan_tensor(image):
print("Denoising input has NAN!")
yuv = self.rgb2yuv(image)
for i in range(self.n_iter):
yuv = self._forward(yuv)
output = self.yuv2rgb(yuv)
if DEBUG and check_nan_tensor(output):
print("Denoising output has NAN!")
return output
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
lum = torch.clamp(lum, 0, 1)
out_yuv = yuv.clone()
# This is use to convert sigma_r so that it is in the same range as
# Halide's bilateral grid
HALIDE_RANGE_GRID = 32.0
skbilateral = skimage.restoration.denoise_bilateral
if self.filter_lum:
# skimage's bilateral filter uses the luminance as the guide.
if is_number(self.guide_transform):
lum = self._gamma_compression(lum, self.guide_transform)
elif self.guide_transform == 'log':
lum = self._log_compression(lum, self._bp)
lum_ = lum.cpu().permute(0, 2, 3, 1).data.numpy().astype('float32')
lum_ = lum_[:, :, :, 0]
# Filter each image in the batch
for i in range(lum_.shape[0]):
# lum_[i, ...] = skbilateral(lum_[i, ...],
# sigma_color=self.sigma_r / HALIDE_RANGE_GRID,
# sigma_spatial=self.sigma_s,
# multichannel=False,
# mode="reflect")
win_sz = max(5, 2 * math.ceil(3 * self.sigma_s) + 1)
lum_[i, ...] = cv2.bilateralFilter(lum_[i, ...],
d=win_sz,
sigmaColor=self.sigma_r / HALIDE_RANGE_GRID,
sigmaSpace=self.sigma_s,
borderType=cv2.BORDER_REFLECT)
lum_ = FloatTensor(lum_).unsqueeze(-1).permute(0, 3, 1, 2)
out_lum = cuda_like(lum_, lum)
# Undo guide transformation
if is_number(self.guide_transform):
out_lum = self._undo_gamma_compression(out_lum, self.guide_transform)
elif self.guide_transform == 'log':
out_lum = self._undo_log_compression(out_lum, self._bp)
out_lum = torch.clamp(out_lum, 0.0, 1.0)
out_yuv[:, 0:1, ...] = out_lum
# Filter chrominance.
if self.filter_chrom:
chrom = yuv[:, 1:3, ...]
chrom = torch.clamp((chrom + 1) * 0.5, 0.0, 1.0)
chrom_ = chrom.cpu().permute(0, 2, 3, 1).data.numpy().astype('float32')
# Filter each image in the batch
for i in range(chrom_.shape[0]):
for j in range(2):
# chrom_[i, :, :, j] = skbilateral(chrom_[i, :, :, j],
# sigma_color=self.sigma_r / HALIDE_RANGE_GRID * self.color_range_ratio,
# sigma_spatial=(self.sigma_s * self.color_sigma_ratio),
# multichannel=False,
# mode="reflect")
win_sz = max(5, 2 * math.ceil(3 * self.sigma_s * self.color_sigma_ratio) + 1)
chrom_[i, :, :, j] = cv2.bilateralFilter(chrom_[i, :, :, j],
d=win_sz,
sigmaColor=self.sigma_r / HALIDE_RANGE_GRID * self.color_range_ratio,
sigmaSpace=self.sigma_s * self.color_sigma_ratio,
borderType=cv2.BORDER_REFLECT)
# Convert back to PyTorch tensor.
chrom_ = FloatTensor(chrom_).permute(0, 3, 1, 2)
out_chrom = cuda_like(chrom_, chrom)
out_chrom = 2.0 * out_chrom - 1.0
out_yuv[:, 1:3, ...] = out_chrom
return out_yuv
class DenoisingWaveletNonDifferentiable(DenoisingSKImageBilateralNonDifferentiable):
def __init__(self, **kwargs):
""" HACK: this function repurpose input for bilateral filters for
different things.
sigma_s --> Thresholding method. Can be string of numerical flags.
color_sigma_ratio --> String indicating wavelet family (see skimage's documentation for detail).
n_iter --> levels of wavelets.
_bp --> wavelet threshold.
"""
super().__init__(**kwargs)
if is_number(self.sigma_s):
self.method = "BayesShrink" if self.sigma_s < 1 else "VisuShrink"
else:
self.method = self.sigma_s
if is_number(self.color_sigma_ratio):
raise ValueError("Wavelet denoising uses color_sigma_ratio to be"
" string indicating wavelet family to use. "
"{} received.".format(self.color_sigma_ratio))
self.wavelet_family = self.color_sigma_ratio
self.wavelet_levels = self.n_iter
self.n_iter = 1
self.wavelet_threshold = self._bp
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
out_yuv = yuv.clone()
# this is use to convert sigma_r so that it is in the same range as Halide's bilateral grid
# HALIDE_RANGE_GRID = 32.0
if self.filter_lum:
if is_number(self.guide_transform):
lum = self._gamma_compression(lum, self.guide_transform)
elif self.guide_transform == 'log':
lum = self._log_compression(lum, self._bp)
lum_ = lum.cpu().permute(0, 2, 3, 1).data.numpy().astype('float64')
lum_ = lum_[:, :, :, 0]
for i in range(lum_.shape[0]):
lum_[i, ...] = sk_denoise_wavelet(lum_[i, ...],
sigma=self.sigma_r,
method=self.method,
wavelet=self.wavelet_family,
wavelet_levels=self.wavelet_levels,
threshold=self.wavelet_threshold,
mode="soft")
lum_ = FloatTensor(lum_).unsqueeze(-1).permute(0, 3, 1, 2)
out_lum = cuda_like(lum_, lum)
if is_number(self.guide_transform):
out_lum = self._undo_gamma_compression(out_lum, self.guide_transform)
elif self.guide_transform == 'log':
out_lum = self._undo_log_compression(out_lum, self._bp)
out_lum = torch.clamp(out_lum, 0.0, 1.0)
out_yuv[:, 0:1, ...] = out_lum
if self.filter_chrom:
chrom = yuv[:, 1:3, ...]
chrom = torch.clamp((chrom + 1) * 0.5, 0.0, 1.0)
chrom_ = chrom.cpu().permute(0, 2, 3, 1).data.numpy().astype('float64')
for i in range(chrom_.shape[0]):
chrom_[i, ...] = sk_denoise_wavelet(chrom_[i, ...],
method=self.method,
wavelet=self.wavelet_family,
wavelet_levels=self.wavelet_levels,
threshold=self.wavelet_threshold,
mode="soft")
chrom_ = FloatTensor(chrom_).permute(0, 3, 1, 2)
out_chrom = cuda_like(chrom_, chrom)
out_chrom = 2.0 * out_chrom - 1.0
out_yuv[:, 1:3, ...] = out_chrom
return out_yuv
class DenoisingMedianNonDifferentiable(nn.Module):
def __init__(self,
neighbor_sz,
color_sigma_ratio=5,
filter_lum=True,
filter_chrom=True,
n_iter=1):
""" Apply Median Filtering
"""
super().__init__()
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
self.filter_lum = filter_lum
self.filter_chrom = filter_chrom
self.n_iter = n_iter
self.lum_median = MedianFilterNonDifferentiable(neighbor_sz)
if is_number(neighbor_sz):
self.chrom_median = MedianFilterNonDifferentiable(int(neighbor_sz * color_sigma_ratio))
else:
if DEBUG and color_sigma_ratio != 1:
print("Warning: ignoring color_sigma_ratio because neighbor_sz is not a number.")
self.chrom_median = self.lum_median
def forward(self, image):
if DEBUG and check_nan_tensor(image):
print("Denoising input has NAN!")
yuv = self.rgb2yuv(image)
for i in range(self.n_iter):
yuv = self._forward(yuv)
output = self.yuv2rgb(yuv)
if DEBUG and check_nan_tensor(output):
print("Denoising output has NAN!")
return output
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
out_yuv = yuv.clone()
if self.filter_lum:
out_lum = self.lum_median(lum)
out_yuv[:, 0:1, ...] = torch.clamp(out_lum, 0.0, 1.0)
if self.filter_chrom:
out_yuv[:, 1:3, ...] = self.chrom_median(yuv[:, 1:3, ...])
return out_yuv
class PytorchResizing(nn.Module):
def __init__(self,
resizing_factor=None,
new_size=None,
mode='bilinear'):
""" Bilinear interpolation for resizing.
*** No Pre-filtering is applied!
Args:
resizing_factor: factors to resize image with. This or new_size
must be specified.
new_size: new image size (width, height) to resize to.
mode: "bilinear", "area", "nearest". See nn.functional.interpolate
for more detail.
"""
super().__init__()
if (new_size is None) == (resizing_factor is None):
raise ValueError("Must specified exactly one of new_size ({})"
" or resizing_factor ({}).".format(new_size,
resizing_factor)
)
self.resizing_factor = resizing_factor
self.new_size = new_size
self.mode = mode
def forward(self, image):
return nn.functional.interpolate(image,
self.new_size,
self.resizing_factor,
mode=self.mode)
class MedianFilterNonDifferentiable(nn.Module):
def __init__(self, filter_sz):
super().__init__()
if is_number(filter_sz):
self.filter_sz = filter_sz
self.footprint = None
else:
self.filter_sz = None
self.footprint = filter_sz
def forward(self, image):
image_ = image.cpu().data.numpy()
for i in range(image.shape[0]):
for j in range(image.shape[1]):
image_[i, j, ...] = scipy.ndimage.filters.median_filter(image_[i, j, ...], size=self.filter_sz,
footprint=self.footprint)
image_ = FloatTensor(image_)
return cuda_like(image_, image)
class BicubicResizing(nn.Module):
def __init__(self,
resizing_factor=None,
new_size=None,
B=1.0, C=0.0):
""" Bicubic interpolation for resizing.
*** No Pre-filtering is applied!
Args:
resizing_factor: factors to resize image with. This or new_size
must be specified.
new_size: new image size (width, height) to resize to.
B, C: parameters of the spline (refer to Mitchell's SIGGRAPH'88 paper).
Default is (1, 0) which makes this a B-spline.
"""
super().__init__()
if (new_size is None) == (resizing_factor is None):
raise ValueError("Must specified exactly one of new_size ({})"
" or resizing_factor ({}).".format(new_size,
resizing_factor)
)
self.resizing_factor = resizing_factor
self.new_size = new_size
self.B, self.C = B, C
# The halide backend still needs debuging.
raise NotImplementedError
def forward(self, image):
if self.resizing_factor is not None:
sz = list(image.size())
new_W = int(self.resizing_factor * sz[-1])
new_H = int(self.resizing_factor * sz[-2])
if new_W < 1 or new_H < 1:
raise ValueError("Image to small that new size is zeros "
"(w, h = {}, {})".format(new_W, new_H))
else:
new_W, new_H = int(self.new_size[0]), int(self.new_size[1])
output = halide_funcs.BicubicResizing.apply(image,
new_W, new_H,
self.B, self.C)
return output
class Unsharpen(nn.Module):
def __init__(self, amount, radius, threshold, blur_filter_sz=None):
"""Unsharp an image.
This doesn't support batching because GaussianBlur doesn't.
Args:
amount: (float) amount of sharpening to apply.
radius: (float) radius of blur for the mask in pixel.
threshold: (float) minimum brightness diff to operate on (on 0-255 scale)
"""
super().__init__()
self.amount = amount
self.radius = radius
self.threshold = threshold
# if not specified, set it to twice the radius.
if blur_filter_sz is None:
self.filter_size = radius * 2
else:
self.filter_size = blur_filter_sz
self.blur = GaussianBlur(self.radius,
sz_x=self.filter_size,
sz_y=self.filter_size)
def forward(self, image):
# Create unsharp mask
unsharp_mask = image - self.blur(image)
# Apply threshold
unsharp_mask = unsharp_mask * (torch.abs(unsharp_mask) > (self.threshold / 255)).float()
return image + unsharp_mask * self.amount
# Demosaicking
class NaiveDemosaicking(nn.Module):
# TODO: Support GPU. Having host_dirty() exception now.
def __init__(self, use_median_filter=True, n_iter=3, **kwargs):
"""
Args:
use_median_filter: whether or not to apply median filter on chrominance/luminance
n_iter: number of times to apply median filters.
"""
super().__init__()
if use_median_filter:
# Same footprint as in the original AHD algorithm.
RB_footprint = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
G_footprint = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
self.median_RB = MedianFilterNonDifferentiable(RB_footprint)
self.median_G = MedianFilterNonDifferentiable(G_footprint)
self.use_median_filter = use_median_filter
self.n_iter = n_iter
if _has_halide():
self.demosaicker = halide_funcs.NaiveDemosaick.apply
def forward(self, image):
demosaicked = self.demosaicker(image)
if self.use_median_filter:
demosaicked_ = demosaicked.cpu()
# repeat 3 times
for i in range(self.n_iter):
# follow AHD paper:
# https://www.photoactivity.com/Pagine/Articoli/006NewDCRaw/hirakawa03adaptive.pdf
R = demosaicked_[:, 0:1, ...].clone()
G = demosaicked_[:, 1:2, ...].clone()
B = demosaicked_[:, 2:3, ...].clone()
R = self.median_RB(R - G) + G
B = self.median_RB(B - G) + G
G = 0.5 * (self.median_G(G - R) + \
self.median_G(G - B) + \
R + B)
demosaicked_[:, 0:1, ...] = R
demosaicked_[:, 1:2, ...] = G
demosaicked_[:, 2:3, ...] = B
demosaicked = cuda_like(demosaicked_, demosaicked)
return demosaicked
class AHDDemosaickingNonDifferentiable(NaiveDemosaicking):
# TODO: Convert Numpy to Pytorch
def __init__(self, use_median_filter=True, n_iter=3, delta=2, sobel_sz=3, avg_sz=3):
super().__init__(use_median_filter, n_iter)
# print("Using AHD Non-differentiable")
def ahd_demosaicker(image):
image_ = image.cpu().permute(0, 2, 3, 1).squeeze(-1).data.numpy()
output = []
for i in range(image_.shape[0]):
output.append(FloatTensor(ahd_demosaicking(image_[i, ...], delta, sobel_sz, avg_sz)).unsqueeze(0))
output = cuda_like(torch.cat(output, dim=0).permute(0, 3, 1, 2), image)
return output
self.demosaicker = ahd_demosaicker
class BayerMosaicking(nn.Module):
""" Turn 3-channel image into GRGB Bayer.
"""
def forward(self, image):
# Compute Meshgrid.
# Tensors are batch x channels x height x width
h, w = image.size(2), image.size(3)
x = torch.arange(w).unsqueeze(0).expand(h, -1)
y = torch.arange(h).unsqueeze(-1).expand(-1, w)
x = x.unsqueeze(0).unsqueeze(0)
y = y.unsqueeze(0).unsqueeze(0)
if image.is_cuda:
x = x.cuda()
y = y.cuda()
odd_x = torch.fmod(x, 2)
odd_y = torch.fmod(y, 2)
is_green = odd_x == odd_y
is_red = odd_x * (1.0 - odd_y)
is_blue = (1.0 - odd_x) * odd_y
return image[:, 0:1, :, :] * is_red.float() + \
image[:, 1:2, :, :] * is_green.float() + \
image[:, 2:3, :, :] * is_blue.float()
# Color
class WhiteBalance(nn.Module):
def __init__(self, scaling, mosaick_pattern=None):
""" Perform white balance with a scaling factor.
Args:
scaling: Tensor of size [channels] for scaling each channel
of the image. Batch dimension is optional.
mosaick_pattern: mosaick pattern of the input image.
"""
super().__init__()
self.scaling = scaling
self.mosaick_pattern = mosaick_pattern
def forward(self, image):
# need to check the type.
self.scaling = cuda_like(self.scaling, image)
return mosaick_multiply(self.scaling,
image,
self.mosaick_pattern)
class WhiteBalanceTemperature(nn.Module):
def __init__(self,
new_temp,
new_tint=0.0,
orig_temp=6504,
orig_tint=0.0,
mosaick_pattern=None):
""" WhiteBalancing with temperature parameterization.
Args:
new_temp: temperature to correct to. Can be scalar or 1D Tensor.
new_tint: tint to correct to. Can be scalar or 1D Tensor.
orig_temp: original temperature (default to D65)
orig_tint: original tint (default to D65)
mosaick_pattern: whether if the input has Bayer pattern.
"""
super().__init__()
# Make sure any scalars are converted to FloatTensor properly.
self.new_temp = python_to_tensor(new_temp)
self.new_tint = python_to_tensor(new_tint)
self.orig_temp = python_to_tensor(orig_temp)
self.orig_tint = python_to_tensor(orig_tint)
self.mosaick_pattern = mosaick_pattern
@staticmethod
def _planckian_locus(T, tint):
"""Calculate Planckian Locus and its derivative in CIExyY.
Args:
T: Correlated Color Temp (in K) (Scalar or 1D tensor)
tint: (to be implemented) (Scalar or 1D tensor)
Returns:
The white point in CIEXYZ space as a tensor of shape [batch x 3]
"""
# formula from wikipedia
def _blackbody_spectrum(l, T):
""" Blackbody radiation spectrum
See https://en.wikipedia.org/wiki/Planckian_locus.
Args:
l: wavelength in nanometer.
T: temperature in Kelvin.
"""
# See https://en.wikipedia.org/wiki/Planckian_locus.
c2 = 1.4387773683E7
l = l.unsqueeze(0)
lT = l * T.unsqueeze(-1)
return 1.0 / (torch.pow(l, 5) * (torch.exp(c2 / lT) - 1))
def _diff_blackbody_spectrum(l, T):
""" Temperature-derivative for blackbody spectrum function. This
is used for tint where we find the perpendicular direction to
the Planckian locus.
"""
c2 = 1.4387773683E7
l = l.unsqueeze(0)
T = T.unsqueeze(-1)
lT = l * T
exp = torch.exp(c2 / (lT))
return c2 * exp / (torch.pow(l, 6) * torch.pow(T * (exp - 1), 2))
# Convert Scalar T into a 1D tensor
if len(T.size()) < 1:
T = T.unsqueeze(0)
# Shape [batch x wavelength]
M = _blackbody_spectrum(xyz_color_matching['lambda'], T)
M_ = _diff_blackbody_spectrum(xyz_color_matching['lambda'], T)
X = torch.sum(M.unsqueeze(1) * xyz_color_matching['xyz'].unsqueeze(0),
dim=-1)
X_ = torch.sum(M_.unsqueeze(1) * xyz_color_matching['xyz'].unsqueeze(0),
dim=-1)
Y = X[:, 1:2]
Y_ = X_[:, 1:2]
X_ = (X_ / Y) - (X / (Y * Y) * Y_)
# switch X and Z so this is orthogonal
X_[:, 0], X_[:, 2] = X_[:, 2], X_[:, 0]
X_[:, 1] = 0
X_ /= torch.sqrt(torch.sum(X_ ** 2, dim=1))
# normalize Y to 1.
X = X / X[:, 1:2] + tint.unsqueeze(-1) * X_
return X
def forward(self, image):
X_orig = self._planckian_locus(self.orig_temp, self.orig_tint)
X_new = self._planckian_locus(self.new_temp, self.new_tint)
# The numerator is the original correction factor that makes D65
# into [1, 1, 1] in sRGB. The XYZ2sRGB matrix encodes this, so as
# a sanity check, XYZ2sRGB * X_D65 should equals 1.
scaling = torch.matmul(XYZ2sRGB, X_new.t()) / \
torch.matmul(XYZ2sRGB, X_orig.t())
# Transpose to [batch, 3]
scaling = scaling.t()
self._wb = WhiteBalance(scaling, self.mosaick_pattern)
return self._wb(image)
class ColorSpaceConversionMatrix(nn.Module):
def __init__(self, matrix):
""" Linear color space conversion.
Useful for converting between sRGB and YUV.
Args:
matrix: matrix to convert color space (should be 2-D Tensor).
The conversion works as c_new = A * c_old, where c's are
column vectors in each color space.
"""
super().__init__()
self.matrix = matrix
def forward(self, image):
self.matrix = cuda_like(self.matrix, image)
return torch.einsum('ij,kjlm->kilm',
(self.matrix,
image)
)
class Saturation(nn.Module):
def __init__(self, value):
""" Adjust Saturation in YUV space
Args:
value: multiplier to the chrominance.
"""
super().__init__()
self.value = value
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
image = self.rgb2yuv(image)
image[:, 1:, ...] *= self.value
image[:, 1:, ...] = torch.clamp(image[:, 1:, ...], -1.0, 1.0)
image = self.yuv2rgb(image)
return image
# Tone
class sRGBLikeGamma(nn.Module):
def __init__(self, threshold, a, mult, gamma):
"""sRGB-like Gamma compression.
Linear at low range then power gamma.
Args:
threshold: threshold under which the conversion becomes linear.
a: constant factor to ensure continuity.
mult: slope for the linear part.
gamma: Gamma value.
"""
super().__init__()
self.threshold = threshold
self.a = a
self.mult = mult
self.gamma = gamma
def forward(self, image):
mask = (image > self.threshold).float()
image_lo = image * self.mult
# 0.001 is to avoid funny thing at 0.
image_hi = (1 + self.a) * torch.pow(image + 0.001, 1.0 / self.gamma) - self.a
return mask * image_hi + (1 - mask) * image_lo
class UndosRGBLikeGamma(nn.Module):
""" Linear at low range then power gamma.
This is inverse of sRGBLikeGamma. See sRGBLikeGamma for detail.
"""
def __init__(self, threshold, a, mult, gamma):
super().__init__()
self.threshold = threshold
self.a = a
self.mult = mult
self.gamma = gamma
def forward(self, image):
mask = (image > self.threshold).float()
image_lo = image / self.mult
image_hi = torch.pow(image + self.a, self.gamma) / (1 + self.a)
return mask * image_hi + (1 - mask) * image_lo
class sRGBGamma(sRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=0.0031308,
a=0.055,
mult=12.92,
gamma=2.4)
class UndosRGBGamma(UndosRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=0.04045,
a=0.055,
mult=12.92,
gamma=2.4)
class ProPhotoRGBGamma(sRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=1.0 / 512.0,
a=0.0,
mult=16.0,
gamma=1.8)
class UndoProPhotoRGBGamma(UndosRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=1.0 / 32.0,
a=0.0,
mult=16.0,
gamma=1.8)
class GammaCompression(nn.Module):
def __init__(self, gamma):
""" Pure power-law gamma compression.
"""
super().__init__()
gamma = python_to_tensor(gamma)
self.gamma = expand_to_4d_batch(gamma)
def forward(self, image):
self.gamma = cuda_like(self.gamma, image)
return (image + 0.0001).pow(self.gamma)
class UndoGammaCompression(nn.Module):
def __init__(self, gamma):
""" Inverse of GammaCompression.
"""
super().__init__()
gamma = python_to_tensor(gamma)
self._gamma = GammaCompression(1.0 / gamma)
def forward(self, image):
return self._gamma(image)
class Gray18Gamma(nn.Module):
def __init__(self, gamma):
""" Applying gamma while keeping 18% gray constant.
"""
super().__init__()
gamma = python_to_tensor(gamma)
self.gamma = expand_to_4d_batch(gamma)
def forward(self, image):
# mult x (0.18)^gamma = 0.18; 0.18 = 18% gray
self.mult = FloatTensor([0.18]).pow(1.0 - self.gamma)
self.gamma = cuda_like(self.gamma, image)
self.mult = cuda_like(self.mult, image)
return self.mult * torch.pow(image + 0.001, self.gamma)
class ToneCurve(nn.Module):
def __init__(self, amount):
""" Tone curve using cubic curve.
The curve is assume to pass 0, 0.25-a, 0.5, 0.75+a, 1, where
a is a parameter controlling the curve. For usability, the parameter
amount of 0 and 1 is mapped to a of 0 and 0.2.
"""
super().__init__()
self.amount = amount
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
a = self.amount * 0.2
self._A = -64.0 * a / 3.0
self._B = 32.0 * a
self._C = 1.0 - 32.0 * a / 3.0
yuv = self.rgb2yuv(image)
y = yuv[:, 0, ...]
y_sqr = y * y
y_cub = y_sqr * y
y = self._A * y_cub + self._B * y_sqr + self._C * y
yuv = yuv.clone()
yuv[:, 0, ...] = y
image = self.yuv2rgb(yuv)
return image
class ToneCurveNZones(nn.Module):
def __init__(self, ctrl_val):
""" Tone curve using linear curve with N zone.
Args:
ctrl_val: list of values that specify control points. These
are assumed to be equally spaced between 0 and 1.
"""
super().__init__()
self.ctrl_val = ctrl_val
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
yuv = self.rgb2yuv(image)
y = yuv[:, 0, ...]
n_zones = len(self.ctrl_val) + 1
val_scaling = 1.0 / n_zones
in_val = torch.linspace(0, 1, n_zones + 1)
out_val = [0] + [val_scaling * (i + 1 + self.ctrl_val[i]) for i in range(len(self.ctrl_val))] + [1]
y_ = 0
for i in range(len(in_val) - 1):
# if statement for the boundary case, in case we have something negatives
mask_lo = (y >= in_val[i]).float() if i > 0 else 1
mask_hi = (y < in_val[i + 1]).float() if i < len(in_val) - 2 else 1
mask = mask_lo * mask_hi
slope = (out_val[i + 1] - out_val[i]) / (in_val[i + 1] - in_val[i])
y_ += ((y - in_val[i]) * slope + out_val[i]) * mask
yuv = yuv.clone()
yuv[:, 0, ...] = y
image = self.yuv2rgb(yuv)
return image
class ToneCurveThreeZones(nn.Module):
def __init__(self, highlight, midtone, shadow):
""" Same as ToneCurveNZones but have different signature so that
it is more explicit.
"""
super().__init__()
self.tc = ToneCurveNZones([shadow, midtone, highlight])
def forward(self, image):
return self.tc.forward(image)
class Quantize(nn.Module):
def __init__(self, nbits=8):
""" Quantize image to number of bits.
"""
super().__init__()
self.nbits = nbits
def forward(self, image):
self.mult = FloatTensor([2]).pow(self.nbits)
self.mult = cuda_like(self.mult, image)
return torch.floor(image * self.mult) / self.mult
class ExposureAdjustment(nn.Module):
def __init__(self, nstops):
""" Exposure adjustment by the stops.
Args:
nstops: number of stops to adjust exposure. Can be scalar or
1D Tensor.
"""
super().__init__()
nstops = python_to_tensor(nstops)
self.nstops = expand_to_4d_batch(nstops)
def forward(self, image):
self._multiplier = FloatTensor([2]).pow(self.nstops)
self._multiplier = cuda_like(self._multiplier, image)
return self._multiplier * image
class AffineExposure(nn.Module):
def __init__(self, mult, add):
""" Exposure adjustment with affine transform.
This calculate exposure according to mult*L + add, where L is
the current pixel value.
Args:
mult: Multiplier. Can be scalar or 1D Tensor.
add: Additive constant. Can be scalar or 1D Tensor.
"""
super().__init__()
mult = python_to_tensor(mult)
add = python_to_tensor(add)
self._mult = expand_to_4d_batch(mult)
self._add = expand_to_4d_batch(add)
def forward(self, image):
self._mult = cuda_like(self._mult, image)
self._add = cuda_like(self._add, image)
return self._mult * image + self._add
class AutoLevelNonDifferentiable(nn.Module):
def __init__(self, blkpt=1, whtpt=99, max_mult=1.5):
""" AutoLevel
Non-differentiable because it uses percentile function.
Args:
blkpt: percentile used as black point.
whtpt: percentile used as white point.
max_mult: max multiplication factor to avoid over brightening
image.
"""
super().__init__()
self.blkpt = blkpt
self.whtpt = whtpt
self.max_mult = max_mult
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
yuv = self.rgb2yuv(image)
y = yuv[:, 0, ...].cpu().numpy()
y = np.reshape(y, (y.shape[0], -1))
blkpt = np.percentile(y, self.blkpt, axis=1)
whtpt = np.percentile(y, self.whtpt, axis=1)
mult = 1.0 / (whtpt - blkpt)
if self.max_mult is not None:
# if self.max_mult == "auto":
# HACK: so that we can control both flow without additional switch.
if self.max_mult < 0:
mm = 4.0 * np.power(whtpt, -self.max_mult)
mm = np.minimum(mm, 4.0)
mm = np.maximum(mm, 1.0)
else:
mm = self.max_mult
mult = np.minimum(mult, mm)
mult = FloatTensor(mult).unsqueeze(-1).unsqueeze(-1)
mult = cuda_like(mult, yuv)
blkpt = FloatTensor(blkpt).unsqueeze(-1).unsqueeze(-1)
blkpt = cuda_like(blkpt, yuv)
# yuv[:, 0, ...] = (yuv[:, 0, ...] - blkpt) * mult
image = (image - blkpt) * mult
image = torch.clamp(image, 0.0, 1.0)
return image
# Noises
class NoiseModule(nn.Module):
"""Base class for noise modules"""
def get_noise_image(self, image):
""" Return additive noise to the image.
This function should return noise image with the standard deviation
and the mosaick pattern baked in.
"""
raise RuntimeError("This is a base class for noise modules. "
"Use one of its subclasses instead.")
def forward(self, image):
return image + self.get_noise_image(image)
class PoissonNoise(NoiseModule):
def __init__(self, sigma, mosaick_pattern=None):
""" Poisson noise
Args:
sigma: multiplier to the noise strength.
"""
super().__init__()
self.sigma = python_to_tensor(sigma)
self.mosaick_pattern = mosaick_pattern
def get_noise_image(self, image):
noise_image = torch.randn_like(image)
noise_image *= torch.sqrt(torch.clamp(image, min=0.0))
self.sigma = cuda_like(self.sigma, image)
return mosaick_multiply(self.sigma, noise_image, self.mosaick_pattern)
class GaussianNoise(NoiseModule):
def __init__(self, sigma, mosaick_pattern=None):
""" Gaussian noise
Args:
sigma: noise STD.
"""
super().__init__()
self.sigma = python_to_tensor(sigma)
self.mosaick_pattern = mosaick_pattern
def get_noise_image(self, image):
noise_image = torch.randn_like(image)
self.sigma = cuda_like(self.sigma, image)
return mosaick_multiply(self.sigma, noise_image, self.mosaick_pattern)
class GaussPoissonMixtureNoise(NoiseModule):
def __init__(self, sigma_p, sigma_g, mosaick_pattern=None):
""" Gaussian and poisson noise mixture.
Args:
sigma_p: poisson noise multiplication..
sigma_g: noise gaussian STD.
"""
super().__init__()
self.mosaick_pattern = mosaick_pattern
self.sigma_p = sigma_p
self.sigma_g = sigma_g
self._poisson = PoissonNoise(self.sigma_p, self.mosaick_pattern)
self._gauss = GaussianNoise(self.sigma_g, self.mosaick_pattern)
def get_noise_image(self, image):
return self._poisson.get_noise_image(image) + \
self._gauss.get_noise_image(image)
# Other artifacts.
class JPEGCompression(nn.Module):
DCT_BLOCK_SIZE = 8
# TODO: Support batch for different quality.
def __init__(self, quality):
""" JPEGCompression with integer quality.
Args:
quality: integer between 0 and 12 (highest quality).
This selects quantization table to use. See constant.py
for detail.
"""
# Quality must be integer between 0 and 12.
super().__init__()
quality = int(quality)
# Add batch and channel dimension
self.DCT_coeff_block = DCT_coeff.clone().unsqueeze(0).unsqueeze(0)
self.quantization_lum = photoshop_jpeg_quantization_lum[quality]
self.quantization_chrom = photoshop_jpeg_quantization_chrom[quality]
self.quantization_lum = self.quantization_lum \
.unsqueeze(0).unsqueeze(0) \
.unsqueeze(-1).unsqueeze(-1)
self.quantization_chrom = self.quantization_chrom \
.unsqueeze(0).unsqueeze(0) \
.unsqueeze(-1).unsqueeze(-1)
self.downsample_chrom = photoshop_chroma_subsampling[quality]
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
@staticmethod
def _tile_sum(arr, dct_block):
"""Do the cumulative sum in tiles over the last two dimensions.
input should be shaped (batch, ch, blk_sz, blk_sz, im_h, im_w)
output will be (batch, ch, blk_sz, blk_sz, n_blk_h, n_blk_w)
"""
verbose = False
dct_block_size = dct_block.size(-1)
# allocating a temp array seems helpful, maybe because it doesn't
# have to write to the original array, which would result in more
# cache misses.
res = torch.zeros((arr.size(0),
arr.size(1),
dct_block_size, dct_block_size,
int(arr.size(4) / dct_block_size),
arr.size(5)))
# also multiply DCT coefficient here because actually repeating
# in two dim and multiply is very slow.
dct_block = dct_block.repeat(1, 1, 1, 1, 1, int(arr.size(5) / dct_block_size))
# Sum in height and multiply.
for i in range(dct_block_size):
res += arr[..., i::dct_block_size, :] * dct_block[..., i:(i + 1), :]
# Sum in width
for i in range(dct_block_size - 1):
res[..., :, (i + 1)::dct_block_size] += res[..., :, i::dct_block_size]
# Slice the array
# now DCT should have dimension (batch, ch, 8, 8, n_blk_h, n_blk_w)
res = res[..., :, (dct_block_size - 1)::dct_block_size]
return res
@staticmethod
def _tile_to_image(arr):
"""Takes arr of shape (batch, ch, blk_sz, blk_sz, n_blk_h, n_blk_w),
and reshape it so that it is (batch, ch, im_h, im_w)
"""
# For readability
dct_block_size = JPEGCompression.DCT_BLOCK_SIZE
n_blk_h = int(arr.size(-2))
n_blk_w = int(arr.size(-1))
# reshape it, assume reshape does it in C-order, last element changing fastest.
# Rearrange it so that it is
# (batch, ch, n_blk_h, v, n_blk_w, u)
arr = arr.permute(0, 1, 4, 2, 5, 3)
# dct is now (batch, ch, y, x, v, u)
arr = arr.contiguous()
arr = arr.view(arr.size(0),
arr.size(1),
n_blk_h * dct_block_size,
n_blk_w * dct_block_size)
return arr
def _compress(self, image, quantization_matrix):
# convert to -128 - 127 range
image = (image * 255.0) - 128.0
# For readability
dct_block_size = JPEGCompression.DCT_BLOCK_SIZE
# pad image
im_h = int(image.size(-2))
im_w = int(image.size(-1))
n_blk_h = int(np.ceil(im_h / dct_block_size))
n_blk_w = int(np.ceil(im_w / dct_block_size))
n_pad_h = n_blk_h * dct_block_size - image.size(-2)
n_pad_w = n_blk_w * dct_block_size - image.size(-1)
# pad image
image = torch.nn.functional.pad(image, (0, n_pad_w, 0, n_pad_h))
# Add u, v dimension
image = image.unsqueeze(-3).unsqueeze(-3)
# Compute DCT
# Sum within each tile.
dct = self._tile_sum(image, self.DCT_coeff_block)
# Quantize
dct = torch.round(dct / quantization_matrix) * quantization_matrix
# reshape it so that this becomes a u-v image.
dct = self._tile_to_image(dct).unsqueeze(-3).unsqueeze(-3)
# DCT should be (batch, ch, 8, 8, im_h, im_w)
# do the sum in u, v
dct = self._tile_sum(dct, self.DCT_coeff_block.permute(0, 1, 4, 5, 2, 3))
dct = self._tile_to_image(dct)
# Undo padding.
dct = dct[..., :im_h, :im_w]
# convert back to 0-1 range
dct = (dct + 128.0) / 255.0
return dct
def forward(self, image):
self.quantization_lum = cuda_like(self.quantization_lum, image)
self.DCT_coeff_block = cuda_like(self.DCT_coeff_block, image)
image_yuv = self.rgb2yuv(image)
image_y = image_yuv[:, 0:1, ...]
image_uv = image_yuv[:, 1:, ...]
# Compress luminance.
image_y = self._compress(image_y, self.quantization_lum)
# Compress the chrominance.
if self.downsample_chrom:
uv_size = image_uv.size()
image_uv = nn.functional.interpolate(image_uv, scale_factor=0.5)
image_uv = self._compress(image_uv, self.quantization_chrom)
if self.downsample_chrom:
image_uv = nn.functional.interpolate(image_uv, size=uv_size[-2:])
image_yuv = torch.cat((image_y, image_uv), dim=1)
image = self.yuv2rgb(image_yuv)
return image
class ChromaticAberration(nn.Module):
def __init__(self, scaling):
"""Chromatic Aberration
Args:
scaling: This class scales R and B channel with factor of scaling and 1/scaling
respectively.
"""
super().__init__()
self.scaling = expand_to_4d_batch(python_to_tensor(scaling))
@staticmethod
def _scale(image, scaling):
# create the affine matrix.
theta = torch.zeros((image.size(0), 2, 3))
# diagonal entry
theta[:, 0, 0] = scaling
theta[:, 1, 1] = scaling
theta = cuda_like(theta, image)
grid = nn.functional.affine_grid(theta, image.size())
return nn.functional.grid_sample(image, grid, padding_mode="border")
def forward(self, image):
# R
output_img = image.clone()
output_img[:, 0:1, ...] = self._scale(image[:, 0:1, ...],
self.scaling)
# B
output_img[:, 2:3, ...] = self._scale(image[:, 2:3, ...],
1.0 / self.scaling)
return output_img
class PixelClip(nn.Module):
""" Module for clipping pixel value.
"""
def forward(self, image):
return torch.clamp(image, 0.0, 1.0)
class RepairHotDeadPixel(nn.Module):
# Adapt from https://github.com/letmaik/rawpy/blob/291afa870727f759a7bb68d756e4603806a466a4/rawpy/enhance.py
def __init__(self, threshold=0.2, median_class="MedianFilterNonDifferentiable"):
""" Repair hot pixel with median filter.
Args:
threshold: Difference to be considered as hot/dead pixels.
"""
super().__init__()
median_classes = {"MedianFilterNonDifferentiable": MedianFilterNonDifferentiable,
}
self.median = median_classes[median_class](3)
self.threshold = threshold
def _repair_one_channel(self, rawslice):
med = self.median(rawslice.clone())
# detect possible bad pixels
candidates = torch.abs(rawslice - med) > self.threshold
candidates = candidates.float()
candidates = cuda_like(candidates, rawslice)
return (1.0 - candidates) * rawslice + candidates * med
def forward(self, image):
# we have bayer
if image.size(1) == 1:
# we have 4 colors (two greens are always seen as two colors)
for offset_y in [0, 1]:
for offset_x in [0, 1]:
rawslice = image[..., offset_y::2, offset_x::2]
rawslice = self._repair_one_channel(rawslice)
image[..., offset_y::2, offset_x::2] = rawslice
else:
# do it per channel
for i in range(image.size(1)):
rawslice = image[:, i:(i + 1), ...]
rawslice = self._repair_one_channel(rawslice)
image[:, i:(i + 1), ...] = rawslice
return image
class PerChannelBlur(nn.Module):
def __init__(self, kern):
""" Blur applied to each channel individually.
Args:
kern: 2D tensors representing the blur kernel.
"""
super().__init__()
self.kern = kern
def forward(self, image):
self.kern = FloatTensor(self.kern).unsqueeze(0).unsqueeze(0)
self.kern = cuda_like(self.kern, image)
n_channel = image.size(1)
padding = []
for i in range(2):
# See https://stackoverflow.com/questions/51131821/even-sized-kernels-with-same-padding-in-tensorflow
sz = self.kern.size(-1 - i)
total_pad = int(sz - 1)
p0 = int(total_pad / 2)
p1 = total_pad - p0
padding += [p0, p1]
# Manually pad.
image = nn.functional.pad(image, padding, mode='reflect')
return nn.functional.conv2d(image,
self.kern.expand(n_channel,
-1, -1, -1),
groups=n_channel)
class SeparablePerChannelBlur(nn.Module):
def __init__(self, kern_x, kern_y=None):
"""Same as PerChannelBlur, but separable kernel.
This is much faster. Useful for when we have separable kernel such as
Gaussian.
Args:
kern_x: 1D tensor representing x-direction kernel.
kern_y: 1D tensor representing y-direction kernel. If None, use
the same thing as kern_x.
"""
super().__init__()
if kern_y is None:
kern_y = kern_x
self.kern_x = kern_x
self.kern_y = kern_y
def forward(self, image):
self.kern_x = FloatTensor(self.kern_x).unsqueeze(0).unsqueeze(0)
self.kern_y = FloatTensor(self.kern_y).unsqueeze(0).unsqueeze(0)
self.kern_x = cuda_like(self.kern_x, image)
self.kern_y = cuda_like(self.kern_y, image)
n_channel = image.size(1)
padding = []
kern_sz = (self.kern_y.size(-1), self.kern_x.size(-1))
for i in range(len(kern_sz)):
# See https://stackoverflow.com/questions/51131821/even-sized-kernels-with-same-padding-in-tensorflow
sz = kern_sz[-1 - i]
total_pad = int(sz - 1)
p0 = int(total_pad / 2)
p1 = total_pad - p0
padding += [p0, p1]
# Manually pad.
image_sz = image.size()
image = nn.functional.pad(image, padding, mode='reflect')
image = image.contiguous().view(-1,
image.size(-2),
image.size(-1))
# Do convolution in each direction
# width, b, height
image = image.permute(2, 0, 1)
image = nn.functional.conv1d(image,
self.kern_y.expand(image.size(1),
-1, -1),
groups=image.size(1))
# height, b, width
image = image.permute(2, 1, 0)
image = nn.functional.conv1d(image,
self.kern_x.expand(image.size(1),
-1, -1),
groups=image.size(1))
# b, height, width
image = image.permute(1, 0, 2)
return image.view(image_sz)
class MotionBlur(PerChannelBlur):
# TODO: Think about how to generate big blur without a giant kernel.
# Seems like this might not be possible
def __init__(self, amt, direction,
kernel_sz=10,
dynrange_th=None,
dynrange_boost=100
):
"""Motion Blur
Args:
amt: (list or number) list of amount of motion blur in pixel.
direction: (list or number) direction of motion in degrees.
kernel_sz: max size of kernel for performance consideration.
dynrange_th: threshold above which will get boosted to simulate
overexposed pixels. (See Burst Image Deblurring Using
Permutation Invariant Convolutional Neural Networks by Aittala
et al. 2018).
dynrange_boost: Multiplicative factor used to boost dynamic range.
"""
# normalize input into a good format.
amt = number_to_list(amt)
direction = number_to_list(direction)
assert len(amt) == len(direction)
# Create the blur kernel.
origin = np.array([0.0, 0.0]).astype('float')
pts = [origin]
min_x = max_x = min_y = max_y = 0.0
for idx in range(len(amt)):
d = direction[idx] * np.pi / 180.0
vec = np.array((np.cos(d), np.sin(d))) * amt[idx]
pt = pts[-1] + vec
x, y = pt[0], pt[1]
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if y < min_y:
min_y = y
if y > max_y:
max_y = y
pts.append(pt)
cv_bit_shift = 8
mult = np.power(2, cv_bit_shift)
if kernel_sz is None:
# figure out kernel_sz
ksz_x = max(max_x - min_x + 2, 8)
ksz_y = max(max_y - min_y + 2, 8)
else:
ksz_x = ksz_y = kernel_sz
ksz_x = int(ksz_x)
ksz_y = int(ksz_y)
kern = np.zeros((ksz_y, ksz_x)).astype('uint8')
pts = np.array(pts)
pts[:, 0] -= min_x
pts[:, 1] -= min_y
pts *= mult
# TODO: Remove cv2 dependencies and use skimage instead.
# LINE_AA only works with uint8 kernel, but there is a bug that it
# only draws the first segment in this mode
cv2.polylines(kern, np.int32([pts]), isClosed=False,
color=1.0, lineType=8,
thickness=1, shift=cv_bit_shift)
kern = kern.astype('float32')
kern = kern / kern.sum()
super().__init__(kern)
self.dynrange_th = dynrange_th
self.dynrange_boost = dynrange_boost
if dynrange_th is not None:
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
def forward(self, image):
if self.dynrange_th is not None:
y = self.rgb2yuv(image)[:, 0:1, ...]
mask = y > self.dynrange_th
mask = cuda_like(mask.float(), image)
image = image * (1.0 + mask * self.dynrange_boost)
image = super().forward(image)
if self.dynrange_th is not None:
image = torch.clamp(image, 0.0, 1.0)
return image
class GaussianBlur(SeparablePerChannelBlur):
def __init__(self, sigma_x, sigma_y=None,
sz_x=None, sz_y=None):
"""Channel-wise Gaussian Blur.
Args:
sigma_x: stdev in x-direction.
sigma_y: stdev in y-direction. (default: sigma_x)
sz_x = kernel size in x (default: twice sigma_x)
sz_y = kernel size in y (default: twice sigma_y)
"""
if sigma_y is None:
sigma_y = sigma_x
if sz_x is None:
sz_x = max(int(2.0 * sigma_x), 1)
if sz_y is None:
sz_y = max(int(2.0 * sigma_y), 1)
super().__init__(None, None)
self.sz_x = sz_x
self.sz_y = sz_y
self.sigma_x = sigma_x
self.sigma_y = sigma_y
def forward(self, image):
self.kern_x = gausskern1d(self.sigma_x, self.sz_x)
self.kern_y = gausskern1d(self.sigma_y, self.sz_y)
return super().forward(image)
class Rotation90Mult(nn.Module):
def __init__(self, angle):
""" Rotate image in multiples of 90.
"""
super().__init__()
self.angle = int(angle) % 360
if self.angle not in [0, 90, 180, 270]:
raise ValueError("Angle must be multiple of 90 degrees")
def forward(self, image):
if self.angle == 0:
return image
elif self.angle == 90:
return image.transpose(2, 3).flip(2)
elif self.angle == 270:
return image.transpose(2, 3).flip(3)
elif self.angle == 180:
return image.flip(2).flip(3)
else:
raise ValueError("Angle must be multiple of 90 degrees")
| 60,184 | 36.615625 | 130 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/__init__.py
|
import sys
import inspect, os
# Need this to import halide. See:
# https://stackoverflow.com/questions/6323860/sibling-package-imports
sys.path.insert(0, os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
'..'))
sys.path.insert(0, os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe()))))
| 389 | 34.454545 | 79 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/pipeline.py
|
import torch.nn as nn
import torch
from . import image_processing
class ImageDegradationPipeline(nn.Module):
def __init__(self, configs):
""" Image Degradation Pipeline.
Args:
configs: list of modules to be implemented and their parameters.
The list should contain tuple of a form (str, dict),
where str indicate module class name (see
image_processing.py), and dict contain the key-value of
the parameter of such module.
"""
super().__init__()
self.initialize_pipeline(configs)
def initialize_pipeline(self, configs):
pipeline = []
# initialize module.
for c in configs:
class_ = getattr(image_processing, c[0])
module = class_(**c[1])
pipeline.append(module)
self._pipeline = nn.Sequential(*pipeline)
# self._pipeline = tuple(pipeline)
def forward(self, image):
# import torchvision.transforms as transforms
# trans = transforms.ToPILImage()
# for index, func in enumerate(self._pipeline):
# image = func(image)
# # save images
# # image_trans = trans((torch.clamp(image, 0.0, 1.0)).squeeze())
# # image_trans.save('./train_images/tmp_{:02d}.png'.format(index), quality=100)
# return image
return self._pipeline(image)
| 1,439 | 34.121951 | 92 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/data_generation/kernel.py
|
import torch
def gausskern1d(sig, sz=None):
""" 1D Gaussian kernel.
Args:
sz: kernel size.
sig: stdev of the kernel
"""
if sz is None:
sz = int(2*int(sig) + 1)
sz = max(sz, 3)
half_sz = int(sz / 2)
neg_half_sz = half_sz - sz + 1
neg_half_sz = float(neg_half_sz)
half_sz = float(half_sz)
x = torch.linspace(neg_half_sz, half_sz, int(sz)) / sig
x = x ** 2
kern = torch.exp(-x/2.0)
kern = kern / kern.sum()
return kern
def gausskern2d(sz_x, sig_x, sz_y=None, sig_y=None):
"""Returns a 2D Gaussian kernel array.
Modified from https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy
Args:
sz_{x,y}: kernel size.
sig_{x,y}: stdev of kernel in each direction
"""
if sz_y is None:
sz_y = sz_x
if sig_y is None:
sig_y = sig_x
kern1d_x = gausskern1d(sz_x, sig_x)
kern1d_y = gausskern1d(sz_y, sig_y)
kernel_raw = torch.einsum('i,j->ij', kern1d_x, kern1d_y)
# This einsum is equivalent to outer product (no repeated indices).
# For future reference
# kernel_raw = np.sqrt(np.einsum('ij,k', kernel_raw, kern_r))
kernel = kernel_raw/kernel_raw.sum()
return kernel
| 1,285 | 26.956522 | 129 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/utils/image_utils.py
|
import numpy as np
import torch
def center_crop_tensor(tensor, w, h):
tw = tensor.size(-1)
th = tensor.size(-2)
if tw < w or th < h:
raise RuntimeError("Crop size is larger than image size.")
h0 = int((th - h) / 2)
w0 = int((tw - w) / 2)
h1 = h0 + h
w1 = w0 + w
return tensor[..., h0:h1, w0:w1]
def bayer_crop_tensor(tensor, w, h, mode="random"):
"""Crop that preserves Bayer phase"""
tw = tensor.size(-1)
th = tensor.size(-2)
if tw < w or th < h:
raise RuntimeError("Crop size ({}) is larger than image size ({})." \
.format((w, h), (tw, th)))
if mode == "random":
h0 = np.random.choice(th + 1 - h)
w0 = np.random.choice(tw + 1 - w)
elif mode == "center":
h0 = int((th - h) / 2)
w0 = int((tw - w) / 2)
else:
raise ValueError("Bayer crop: unrecognized mode ({}). Must be 'random' or 'center'.".format(mode))
# make sure start index is divisible by 2
h0 = h0 - (h0 % 2)
w0 = w0 - (w0 % 2)
h1 = h0 + h
w1 = w0 + w
return tensor[..., h0:h1, w0:w1]
def random_crop_tensor(tensor, w, h):
tw = tensor.size(-1)
th = tensor.size(-2)
if tw < w or th < h:
raise RuntimeError("Crop size is larger than image size.")
h0 = np.random.randint(th - h)
w0 = np.random.randint(tw - w)
h1 = h0 + h
w1 = w0 + w
return tensor[..., h0:h1, w0:w1]
def check_nan_tensor(x):
return torch.isnan(x).any()
| 1,497 | 26.740741 | 106 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/utils/__init__.py
|
import sys
import inspect, os
sys.path.insert(0, os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
'..'))
sys.path.insert(0, os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe()))))
| 284 | 30.666667 | 79 |
py
|
kernel-prediction-networks-PyTorch
|
kernel-prediction-networks-PyTorch-master/utils/training_util.py
|
import numpy as np
import glob
import torch
import shutil
import os
import cv2
import numbers
import skimage
from collections import OrderedDict
from configobj import ConfigObj
from validate import Validator
from data_generation.pipeline import ImageDegradationPipeline
class MovingAverage(object):
def __init__(self, n):
self.n = n
self._cache = []
self.mean = 0
def update(self, val):
self._cache.append(val)
if len(self._cache) > self.n:
del self._cache[0]
self.mean = sum(self._cache) / len(self._cache)
def get_value(self):
return self.mean
def save_checkpoint(state, is_best, checkpoint_dir, n_iter, max_keep=10):
filename = os.path.join(checkpoint_dir, "{:06d}.pth.tar".format(n_iter))
torch.save(state, filename)
if is_best:
shutil.copyfile(filename,
os.path.join(checkpoint_dir,
'model_best.pth.tar'))
files = sorted(os.listdir(checkpoint_dir))
rm_files = files[0:max(0, len(files) - max_keep)]
for f in rm_files:
os.remove(os.path.join(checkpoint_dir, f))
def _represent_int(s):
try:
int(s)
return True
except ValueError:
return False
def load_checkpoint(checkpoint_dir, best_or_latest='best'):
if best_or_latest == 'best':
checkpoint_file = os.path.join(checkpoint_dir, 'model_best.pth.tar')
elif isinstance(best_or_latest, numbers.Number):
checkpoint_file = os.path.join(checkpoint_dir,
'{:06d}.pth.tar'.format(best_or_latest))
if not os.path.exists(checkpoint_file):
files = glob.glob(os.path.join(checkpoint_dir, '*.pth.tar'))
basenames = [os.path.basename(f).split('.')[0] for f in files]
iters = sorted([int(b) for b in basenames if _represent_int(b)])
raise ValueError('Available iterations are ({} requested): {}'.format(best_or_latest, iters))
else:
files = glob.glob(os.path.join(checkpoint_dir, '*.pth.tar'))
basenames = [os.path.basename(f).split('.')[0] for f in files]
iters = sorted([int(b) for b in basenames if _represent_int(b)])
checkpoint_file = os.path.join(checkpoint_dir,
'{:06d}.pth.tar'.format(iters[-1]))
return torch.load(checkpoint_file)
def load_statedict_runtime(checkpoint_dir, best_or_latest='best'):
# This function grabs state_dict from checkpoint, and do modification
# to the weight name so that it can be load at runtime.
# During training nn.DataParallel adds 'module.' to the name,
# which doesn't exist at test time.
ckpt = load_checkpoint(checkpoint_dir, best_or_latest)
state_dict = ckpt['state_dict']
global_iter = ckpt['global_iter']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
# remove `module.`
name = k[7:]
new_state_dict[name] = v
return new_state_dict, global_iter
def prep_and_vis_flow(flow, flow_visualizer, max_flow=None):
flow = flow_visualizer(flow[0, :, :, :], max_flow=max_flow)
flow = flow.cpu().data.numpy()
return flow
def put_text_on_img(image, text, loc=(20, 100), color=(1, 0, 0)):
""" Put text on flow
Args:
image: numpy array of dimension (3, h, w)
text: text to put on.
loc: ibottom-left location of text in (x, y) from top-left of image.
color: color of the text.
Returns:
image with text written on it.
"""
image = np.array(np.moveaxis(image, 0, -1)).copy()
cv2.putText(image, text, loc, cv2.FONT_HERSHEY_SIMPLEX, 1, color)
return np.moveaxis(image, -1, 0)
def read_config(config_file, config_spec):
configspec = ConfigObj(config_spec, raise_errors=True)
config = ConfigObj(config_file,
configspec=configspec,
raise_errors=True,
file_error=True)
config.validate(Validator())
return config
def torch2numpy(tensor, gamma=None):
tensor = torch.clamp(tensor, 0.0, 1.0)
# Convert to 0 - 255
if gamma is not None:
tensor = torch.pow(tensor, gamma)
tensor *= 255.0
return tensor.permute(0, 2, 3, 1).cpu().data.numpy()
def prep_for_vis(degraded_img, target_img, output_img, exposure=None):
if exposure is not None:
def adjust_exp(img, exp):
configs = [
('PixelClip', {}),
('ExposureAdjustment', {'nstops': exp}),
('PixelClip', {}),
]
return ImageDegradationPipeline(configs)(img)
degraded_img = adjust_exp(degraded_img, exposure)
target_img = adjust_exp(target_img, exposure)
output_img = adjust_exp(output_img, exposure)
degraded_tf = torch2numpy(degraded_img, 1.0 / 2.2).astype('uint8')
# Gamma encode output for illustration purpose
target_tf = torch2numpy(target_img, 1.0 / 2.2).astype('uint8')
output_tf = torch2numpy(output_img, 1.0 / 2.2).astype('uint8')
return degraded_tf, target_tf, output_tf
def prep_for_vis_arr(img_arr, exposure=None):
if exposure is not None:
configs = [
('PixelClip', {}),
('ExposureAdjustment', {'nstops': exposure}),
('PixelClip', {}),
]
exp_adj = ImageDegradationPipeline(configs)
img_arr = [exp_adj(im) for im in img_arr]
img_arr = [torch2numpy(im, 1.0 / 2.2).astype('uint8') for im in img_arr]
return img_arr
def create_vis_arr(img_arr, exposure=None):
img_arr = prep_for_vis_arr(img_arr, exposure)
return np.concatenate(img_arr, axis=-2)
def create_vis(degraded_img, target_img, output_img, exposure=None):
degraded_tf, target_tf, output_tf = prep_for_vis(degraded_img,
target_img,
output_img)
img = np.concatenate((degraded_tf,
target_tf,
output_tf),
axis=-2)
return img
def calculate_psnr(output_img, target_img):
target_tf = torch2numpy(target_img)
output_tf = torch2numpy(output_img)
psnr = 0.0
n = 0.0
for im_idx in range(output_tf.shape[0]):
psnr += skimage.measure.compare_psnr(target_tf[im_idx, ...],
output_tf[im_idx, ...],
data_range=255)
n += 1.0
return psnr / n
def calculate_ssim(output_img, target_img):
target_tf = torch2numpy(target_img)
output_tf = torch2numpy(output_img)
ssim = 0.0
n = 0.0
for im_idx in range(output_tf.shape[0]):
ssim += skimage.measure.compare_ssim(target_tf[im_idx, ...],
output_tf[im_idx, ...],
multichannel=True,
data_range=255)
n += 1.0
return ssim / n
| 7,118 | 34.41791 | 105 |
py
|
Geo-FPT
|
Geo-FPT-main/interface.py
|
import numpy as np
import ctypes
from ctypes import *
from numpy.ctypeslib import ndpointer
"define a pointer for 1D arrays"
_doublep = ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')
"define a pointer for 1D arrays INT "
_intp = ndpointer(ctypes.c_int, flags='C_CONTIGUOUS')
"define a pointer for 2D arrays"
_doublepp = ndpointer(dtype=np.uintp, ndim=1, flags='C')
"function to convert 2D array into a proper format for C"
def c_2d_inp(x):
return (x.__array_interface__['data'][0]
+ np.arange(x.shape[0]) * x.strides[0]).astype(np.uintp)
path_lib = './C_lib/'
clib = CDLL(path_lib + 'clib.so')
""" BK MULTIPOLES"""
clib.ext_bk_mp.restype = None
clib.ext_bk_mp.argtypes = [_doublepp, _doublepp,_doublepp,_doublepp,_doublep, _doublep,_doublep, c_double,c_int,c_int,c_int,c_int,c_int,c_int, _doublep]
def bk_multip(tr, tr2,tr3,tr4, kp, pk, cosm_par, redshift,fit_full=1):
bk_out = np.zeros(tr.shape[0]+tr2.shape[0]+tr3.shape[0]+tr4.shape[0], dtype='float')
clib.ext_bk_mp(c_2d_inp(tr), c_2d_inp(tr2),c_2d_inp(tr3),c_2d_inp(tr4),np.log10(kp), np.log10(pk),
cosm_par, redshift, fit_full, kp.size, tr.shape[0],tr2.shape[0],
tr3.shape[0],tr4.shape[0], bk_out)
ind,ind2,ind3 = tr.shape[0],tr2.shape[0],tr3.shape[0]
bk0 = bk_out[:ind]
bk200 = bk_out[ind:(ind+ind2)]
bk020 = bk_out[(ind+ind2):(ind+ind2+ind3)]
bk002 = bk_out[(ind+ind2+ind3):]
return bk0, bk200, bk020, bk002
#clib.ext_Preal.restype = None
#clib.ext_Preal.argtypes = [_doublepp,_doublep,_doublep,c_double,c_int]
#def Pdd(theory,kin,cosm_par):
# Pout = np.zeros(kin.size)
# sigma8_sc = (cosm_par[0]/theory[-1,-1])**2
# Num = kin.size
#print(kin,sigma8_sc,Num)
# clib.ext_Preal(c_2d_inp(theory),kin,Pout,sigma8_sc,Num)
# return Pout
clib.ext_Preal.restype = None
clib.ext_Preal.argtypes = [_doublepp,_doublep,_doublep,c_double,c_double,c_double,c_int]
def Pdd(theory,kin,cosm_par):
Pout = np.zeros(kin.size)
alpa,alpe = 1.,1.
sigma8_sc = (cosm_par[0]/theory[-1,-1])**2
Num = kin.size
clib.ext_Preal(c_2d_inp(theory),kin,Pout,alpa,alpe,sigma8_sc,Num)
return Pout
clib.ext_Prsd.restype = None
clib.ext_Prsd.argtypes = [_doublepp,_doublep,c_int,_doublep,c_double,c_double,c_double,c_int,c_double,c_double,c_double ,c_double,c_double,c_int,c_double]
def P024(theory,kin,cosm_par,mup):
bs2 = -4./7.*(cosm_par[4]-1.)
b3nl= 32./315.*(cosm_par[4]-1.)
b1,b2,f,sig_P = cosm_par[4],cosm_par[5],cosm_par[1],cosm_par[7]
alpa,alpe = cosm_par[2],cosm_par[3]
sigma8_sc = (cosm_par[0]/theory[-1,-1])**2
Pout = np.zeros(kin.size)
Num = Pout.size
kin_dim = kin.size
clib.ext_Prsd(c_2d_inp(theory),kin,kin_dim,Pout,alpa,alpe,sigma8_sc,Num,
b1,b2,bs2,b3nl,f,mup,sig_P)
return Pout
| 2,862 | 31.908046 | 154 |
py
|
prospector
|
prospector-master/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import glob
#import subprocess
try:
from setuptools import setup
setup
except ImportError:
from distutils.core import setup
setup
#githash = subprocess.check_output(["git", "log", "--format=%h"], universal_newlines=True).split('\n')[0]
vers = "0.4.0"
githash = ""
with open('prospect/_version.py', "w") as f:
f.write('__version__ = "{}"\n'.format(vers))
f.write('__githash__ = "{}"\n'.format(githash))
setup(
name="prospect",
url="https://github.com/bd-j/prospect",
version=vers,
author="Ben Johnson",
author_email="[email protected]",
packages=["prospect",
"prospect.models",
"prospect.likelihood",
"prospect.fitting",
"prospect.sources",
"prospect.io",
"prospect.utils"],
license="LICENSE",
description="Stellar Population Inference",
long_description=open("README.md").read(),
package_data={"": ["README.md", "LICENSE"]},
scripts=glob.glob("scripts/*.py"),
include_package_data=True,
install_requires=["numpy"],
)
| 1,173 | 25.088889 | 105 |
py
|
prospector
|
prospector-master/prospect/__init__.py
|
try:
from ._version import __version__, __githash__
except(ImportError):
pass
from . import models
from . import fitting
from . import io
from . import sources
from . import utils
from . import likelihood
from .utils import prospect_args
| 247 | 18.076923 | 50 |
py
|
prospector
|
prospector-master/prospect/fitting/nested.py
|
import sys, time
import numpy as np
from numpy.random import normal, multivariate_normal
from six.moves import range
try:
import nestle
except(ImportError):
pass
try:
import dynesty
from dynesty.utils import *
from dynesty.dynamicsampler import _kld_error
except(ImportError):
pass
__all__ = ["run_nestle_sampler", "run_dynesty_sampler"]
def run_nestle_sampler(lnprobfn, model, verbose=True,
callback=None,
nestle_method='multi', nestle_npoints=200,
nestle_maxcall=int(1e6), nestle_update_interval=None,
**kwargs):
result = nestle.sample(lnprobfn, model.prior_transform, model.ndim,
method=nestle_method, npoints=nestle_npoints,
callback=callback, maxcall=nestle_maxcall,
update_interval=nestle_update_interval)
return result
def run_dynesty_sampler(lnprobfn, prior_transform, ndim, verbose=True,
nested_bound='multi', nested_sample='unif',
nested_nlive_init=100, nested_nlive_batch=100,
nested_update_interval=0.6, nested_walks=25,
nested_maxcall=None, nested_maxiter_init=None,
pool=None, queue_size=1, nested_use_stop=True,
nested_maxbatch=None, nested_weight_kwargs={'pfrac': 1.0},
nested_bootstrap=0, nested_dlogz_init=0.02,
use_pool={}, nested_first_update={},
nested_maxcall_init=None, nested_live_points=None,
nested_maxcall_batch=None, nested_maxiter=None,
stop_function=None, wt_function=None,
nested_maxiter_batch=None, nested_stop_kwargs={},
nested_save_bounds=False,**kwargs):
# instantiate sampler
dsampler = dynesty.DynamicNestedSampler(lnprobfn, prior_transform, ndim,
bound=nested_bound, sample=nested_sample,
update_interval=nested_update_interval,
pool=pool, queue_size=queue_size,
walks=nested_walks, bootstrap=nested_bootstrap,
use_pool=use_pool)
# generator for initial nested sampling
ncall = dsampler.ncall
niter = dsampler.it - 1
tstart = time.time()
for results in dsampler.sample_initial(nlive=nested_nlive_init,
dlogz=nested_dlogz_init,
maxcall=nested_maxcall_init,
maxiter=nested_maxiter_init,
live_points=nested_live_points):
(worst, ustar, vstar, loglstar, logvol,
logwt, logz, logzvar, h, nc, worst_it,
propidx, propiter, eff, delta_logz) = results
if delta_logz > 1e6:
delta_logz = np.inf
ncall += nc
niter += 1
with np.errstate(invalid='ignore'):
logzerr = np.sqrt(logzvar)
sys.stderr.write("\riter: {:d} | batch: {:d} | nc: {:d} | "
"ncall: {:d} | eff(%): {:6.3f} | "
"logz: {:6.3f} +/- {:6.3f} | "
"dlogz: {:6.3f} > {:6.3f} "
.format(niter, 0, nc, ncall, eff, logz,
logzerr, delta_logz, nested_dlogz_init))
sys.stderr.flush()
ndur = time.time() - tstart
if verbose:
print('\ndone dynesty (initial) in {0}s'.format(ndur))
if nested_maxcall is None:
nested_maxcall = sys.maxsize
if nested_maxbatch is None:
nested_maxbatch = sys.maxsize
if nested_maxcall_batch is None:
nested_maxcall_batch = sys.maxsize
if nested_maxiter is None:
nested_maxiter = sys.maxsize
if nested_maxiter_batch is None:
nested_maxiter_batch = sys.maxsize
# generator for dynamic sampling
tstart = time.time()
for n in range(dsampler.batch, nested_maxbatch):
# Update stopping criteria.
dsampler.sampler.save_bounds = False
res = dsampler.results
mcall = min(nested_maxcall - ncall, nested_maxcall_batch)
miter = min(nested_maxiter - niter, nested_maxiter_batch)
if nested_use_stop:
if dsampler.use_pool_stopfn:
M = dsampler.M
else:
M = map
stop, stop_vals = stop_function(res, nested_stop_kwargs,
rstate=dsampler.rstate, M=M,
return_vals=True)
stop_post, stop_evid, stop_val = stop_vals
else:
stop = False
stop_val = np.NaN
# If we have either likelihood calls or iterations remaining,
# run our batch.
if mcall > 0 and miter > 0 and not stop:
# Compute our sampling bounds using the provided
# weight function.
logl_bounds = wt_function(res, nested_weight_kwargs)
lnz, lnzerr = res.logz[-1], res.logzerr[-1]
for results in dsampler.sample_batch(nlive_new=nested_nlive_batch,
logl_bounds=logl_bounds,
maxiter=miter,
maxcall=mcall,
save_bounds=nested_save_bounds):
(worst, ustar, vstar, loglstar, nc,
worst_it, propidx, propiter, eff) = results
ncall += nc
niter += 1
sys.stderr.write("\riter: {:d} | batch: {:d} | "
"nc: {:d} | ncall: {:d} | "
"eff(%): {:6.3f} | "
"loglstar: {:6.3f} < {:6.3f} "
"< {:6.3f} | "
"logz: {:6.3f} +/- {:6.3f} | "
"stop: {:6.3f} "
.format(niter, n+1, nc, ncall,
eff, logl_bounds[0], loglstar,
logl_bounds[1], lnz, lnzerr,
stop_val))
sys.stderr.flush()
dsampler.combine_runs()
else:
# We're done!
break
ndur = time.time() - tstart
if verbose:
print('done dynesty (dynamic) in {0}s'.format(ndur))
return dsampler.results
| 6,784 | 41.142857 | 91 |
py
|
prospector
|
prospector-master/prospect/fitting/fitting.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""fitting.py -- Default posterior probability function and high-level fitting
methods for prospector
"""
import time
from functools import partial as argfix
import numpy as np
from scipy.optimize import minimize, least_squares
from .minimizer import minimize_wrapper, minimizer_ball
from .ensemble import run_emcee_sampler
from .nested import run_dynesty_sampler
from ..likelihood import lnlike_spec, lnlike_phot, chi_spec, chi_phot, write_log
from ..utils.obsutils import fix_obs
__all__ = ["lnprobfn", "fit_model",
"run_minimize", "run_emcee", "run_dynesty"
]
def lnprobfn(theta, model=None, obs=None, sps=None, noise=(None, None),
residuals=False, nested=False, verbose=False):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the matural log of the posterior. This
requires that an sps object (and if using spectra and gaussian processes, a
NoiseModel) be instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
SedModel model object, with attributes including ``params``, a
dictionary of model parameter state. It must also have
:py:func:`prior_product`, and :py:func:`predict` methods
defined.
:param obs:
A dictionary of observational data. The keys should be
+ ``"wavelength"`` (angstroms)
+ ``"spectrum"`` (maggies)
+ ``"unc"`` (maggies)
+ ``"maggies"`` (photometry in maggies)
+ ``"maggies_unc"`` (photometry uncertainty in maggies)
+ ``"filters"`` (iterable of :py:class:`sedpy.observate.Filter`)
+ and optional spectroscopic ``"mask"`` and ``"phot_mask"``
(same length as ``spectrum`` and ``maggies`` respectively,
True means use the data points)
:param sps:
A :py:class:`prospect.sources.SSPBasis` object or subclass thereof, or
any object with a ``get_spectrum`` method that will take a dictionary
of model parameters and return a spectrum, photometry, and ancillary
information.
:param noise: (optional, default: (None, None))
A 2-element tuple of :py:class:`prospect.likelihood.NoiseModel` objects.
:param residuals: (optional, default: False)
A switch to allow vectors of :math:`\chi` values to be returned instead
of a scalar posterior probability. This can be useful for
least-squares optimization methods. Note that prior probabilities are
not included in this calculation.
:param nested: (optional, default: False)
If ``True``, do not add the ln-prior probability to the ln-likelihood
when computing the ln-posterior. For nested sampling algorithms the
prior probability is incorporated in the way samples are drawn, so
should not be included here.
:returns lnp:
Ln posterior probability, unless ``residuals=True`` in which case a
vector of :math:`\chi` values is returned.
"""
if residuals:
lnnull = np.zeros(obs["ndof"]) - 1e18 # np.infty
#lnnull = -np.infty
else:
lnnull = -np.infty
# --- Calculate prior probability and exit if not within prior ---
lnp_prior = model.prior_product(theta, nested=nested)
if not np.isfinite(lnp_prior):
return lnnull
# --- Update Noise Model ---
spec_noise, phot_noise = noise
vectors, sigma_spec = {}, None
model.set_parameters(theta)
if spec_noise is not None:
spec_noise.update(**model.params)
vectors.update({"unc": obs.get('unc', None)})
sigma_spec = spec_noise.construct_covariance(**vectors)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors.update({'phot_unc': obs.get('maggies_unc', None),
'phot': obs.get('maggies', None)})
# --- Generate mean model ---
try:
t1 = time.time()
spec, phot, x = model.predict(theta, obs, sps=sps, sigma_spec=sigma_spec)
d1 = time.time() - t1
except(ValueError):
return lnnull
except:
print("There was an error during the likelihood call at parameters {}".format(theta))
raise
# --- Optionally return chi vectors for least-squares ---
# note this does not include priors!
if residuals:
chispec = chi_spec(spec, obs)
chiphot = chi_phot(phot, obs)
return np.concatenate([chispec, chiphot])
# --- Mixture Model ---
f_outlier_spec = model.params.get('f_outlier_spec', 0.0)
if (f_outlier_spec != 0.0):
sigma_outlier_spec = model.params.get('nsigma_outlier_spec', 10)
vectors.update({'nsigma_outlier_spec': sigma_outlier_spec})
f_outlier_phot = model.params.get('f_outlier_phot', 0.0)
if (f_outlier_phot != 0.0):
sigma_outlier_phot = model.params.get('nsigma_outlier_phot', 10)
vectors.update({'nsigma_outlier_phot': sigma_outlier_phot})
# --- Emission Lines ---
# --- Calculate likelihoods ---
t1 = time.time()
lnp_spec = lnlike_spec(spec, obs=obs,
f_outlier_spec=f_outlier_spec,
spec_noise=spec_noise,
**vectors)
lnp_phot = lnlike_phot(phot, obs=obs,
f_outlier_phot=f_outlier_phot,
phot_noise=phot_noise, **vectors)
lnp_eline = getattr(model, '_ln_eline_penalty', 0.0)
d2 = time.time() - t1
if verbose:
write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)
return lnp_prior + lnp_phot + lnp_spec + lnp_eline
def wrap_lnp(lnpfn, obs, model, sps, **lnp_kwargs):
return argfix(lnpfn, obs=obs, model=model, sps=sps,
**lnp_kwargs)
def fit_model(obs, model, sps, noise=(None, None), lnprobfn=lnprobfn,
optimize=False, emcee=False, dynesty=True, **kwargs):
"""Fit a model to observations using a number of different methods
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise: (optional, default: (None, None))
A tuple of NoiseModel objects for the spectroscopy and photometry
respectively. Can also be (None, None) in which case simple chi-square
will be used.
:param lnprobfn: (optional, default: lnprobfn)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. By default use the
:py:func:`lnprobfn` defined above.
:param optimize: (optional, default: False)
If ``True``, conduct a round of optimization before sampling from the
posterior. The model state will be set to the best value at the end of
optimization before continuing on to sampling or returning. Parameters
controlling the optimization can be passed via ``kwargs``, including
+ ``min_method``: 'lm' | 'powell'
+ ``nmin``: number of minimizations to do. Beyond the first, minimizations
will be started from draws from the prior.
+ ``min_opts``: dictionary of minimization options passed to the
scipy.optimize.minimize method.
See :py:func:`run_minimize` for details.
:param emcee: (optional, default: False)
If ``True``, sample from the posterior using emcee. Additonal
parameters controlling emcee can be passed via ``**kwargs``. These include
+ ``initial_positions``: A set of initial positions for the walkers
+ ``hfile``: an open h5py.File file handle for writing result incrementally
Many additional emcee parameters can be provided here, see
:py:func:`run_emcee` for details.
:param dynesty:
If ``True``, sample from the posterior using dynesty. Additonal
parameters controlling dynesty can be passed via ``**kwargs``. See
:py:func:`run_dynesty` for details.
:returns output:
A dictionary with two keys, 'optimization' and 'sampling'. The value
of each of these is a 2-tuple with results in the first element and
durations (in seconds) in the second element.
"""
# Make sure obs has required keys
obs = fix_obs(obs)
if emcee & dynesty:
msg = ("Cannot run both emcee and dynesty fits "
"in a single call to fit_model")
raise(ValueError, msg)
output = {"optimization": (None, 0.),
"sampling": (None, 0.)}
if optimize:
optres, topt, best = run_minimize(obs, model, sps, noise,
lnprobfn=lnprobfn, **kwargs)
# set to the best
model.set_parameters(optres[best].x)
output["optimization"] = (optres, topt)
if emcee:
run_sampler = run_emcee
elif dynesty:
run_sampler = run_dynesty
else:
return output
output["sampling"] = run_sampler(obs, model, sps, noise,
lnprobfn=lnprobfn, **kwargs)
return output
def run_minimize(obs=None, model=None, sps=None, noise=None, lnprobfn=lnprobfn,
min_method='lm', min_opts={}, nmin=1, pool=None, **extras):
"""Run a minimization. This wraps the lnprobfn fixing the ``obs``,
``model``, ``noise``, and ``sps`` objects, and then runs a minimization of
-lnP using scipy.optimize methods.
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise: (optional)
If given, a tuple of :py:class:`NoiseModel` objects passed to
``lnprobfn``.
:param lnprobfn: (optional, default: lnprobfn)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. By default use the
:py:func:`lnprobfn` defined above.
:param min_method: (optional, default: 'lm')
Method to use for minimization
* 'lm': Levenberg-Marquardt
* 'powell': Powell line search method
:param nmin: (optional, default: 1)
Number of minimizations to do. Beyond the first, minimizations will be
started from draws from the prior.
:param min_opts: (optional, default: {})
Dictionary of minimization options passed to the scipy.optimize method.
These include things like 'xtol', 'ftol', etc..
:param pool: (optional, default: None)
A pool to use for parallel optimization from multiple initial positions.
:returns results:
A list of `scipy.optimize.OptimizeResult` objects.
:returns tm:
Wall time used for the minimization, in seconds.
:returns best:
The index of the results list containing the lowest chi-square result.
"""
initial = model.theta.copy()
lsq = ["lm"]
scalar = ["powell"]
# --- Set some options based on minimization method ---
if min_method in lsq:
algorithm = least_squares
residuals = True
min_opts["x_scale"] = "jac"
elif min_method in scalar:
algorithm = minimize
residuals = False
args = []
loss = argfix(lnprobfn, obs=obs, model=model, sps=sps,
noise=noise, residuals=residuals)
minimizer = minimize_wrapper(algorithm, loss, [], min_method, min_opts)
qinit = minimizer_ball(initial, nmin, model)
if pool is not None:
M = pool.map
else:
M = map
t = time.time()
results = list(M(minimizer, [np.array(q) for q in qinit]))
tm = time.time() - t
if min_method in lsq:
chisq = [np.sum(r.fun**2) for r in results]
best = np.argmin(chisq)
elif min_method in scalar:
best = np.argmin([p.fun for p in results])
return results, tm, best
def run_emcee(obs, model, sps, noise, lnprobfn=lnprobfn,
hfile=None, initial_positions=None,
**kwargs):
"""Run emcee, optionally including burn-in and convergence checking. Thin
wrapper on :py:class:`prospect.fitting.ensemble.run_emcee_sampler`
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise:
A tuple of :py:class:`NoiseModel` objects passed to ``lnprobfn``.
:param lnprobfn: (optional, default: lnprobfn)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. By default use the
:py:func:`lnprobfn` defined above.
:param hfile: (optional, default: None)
A file handle for a :py:class:`h5py.File` object that will be written
to incremantally during sampling.
:param initial_positions: (optional, default: None)
If given, a set of initial positions for the emcee walkers. Must have
shape (nwalkers, ndim). Rounds of burn-in will be skipped if this
parameter is present.
Extra Parameters
--------
:param nwalkers:
The number of walkers to use. If None, use the nearest power of two to
``ndim * walker_factor``.
:param niter:
Number of iterations for the production run
:param nburn:
List of the number of iterations to run in each round of burn-in (for
removing stuck walkers.) E.g. `nburn=[32, 64]` will run the sampler for
32 iterations before reinitializing and then run the sampler for
another 64 iterations before starting the production run.
:param storechain: (default: True)
If using HDF5 output, setting this to False will keep the chain from
being held in memory by the sampler object.
:param pool: (optional)
A ``Pool`` object, either from ``multiprocessing`` or from
``emcee.mpi_pool``.
:param interval:
Fraction of the full run at which to flush to disk, if using hdf5 for
output.
:param convergence_check_interval:
How often to assess convergence, in number of iterations. If this is
not `None`, then the KL convergence test is run.
:param convergence_chunks:
The number of iterations to combine when creating the marginalized
parameter probability functions.
:param convergence_stable_points_criteria:
The number of stable convergence checks that the chain must pass before
being declared stable.
Returns
--------
:returns sampler:
An instance of :py:class:`emcee.EnsembleSampler`.
:returns ts:
Duration of sampling (including burn-in) in seconds of wall time.
"""
q = model.theta.copy()
postkwargs = {"obs": obs,
"model": model,
"sps": sps,
"noise": noise,
"nested": False,
}
# Could try to make signatures for these two methods the same....
if initial_positions is not None:
meth = restart_emcee_sampler
t = time.time()
out = meth(lnprobfn, initial_positions, hdf5=hfile,
postkwargs=postkwargs, **kwargs)
sampler = out
ts = time.time() - t
else:
meth = run_emcee_sampler
t = time.time()
out = meth(lnprobfn, q, model, hdf5=hfile,
postkwargs=postkwargs, **kwargs)
sampler, burn_p0, burn_prob0 = out
ts = time.time() - t
return sampler, ts
def run_dynesty(obs, model, sps, noise, lnprobfn=lnprobfn,
pool=None, nested_posterior_thresh=0.05, **kwargs):
"""Thin wrapper on :py:class:`prospect.fitting.nested.run_dynesty_sampler`
:param obs:
The ``obs`` dictionary containing the data to fit to, which will be
passed to ``lnprobfn``.
:param model:
An instance of the :py:class:`prospect.models.SedModel` class
containing the model parameterization and parameter state. It will be
passed to ``lnprobfn``.
:param sps:
An instance of a :py:class:`prospect.sources.SSPBasis` (sub-)class.
Alternatively, anything with a compatible :py:func:`get_spectrum` can
be used here. It will be passed to ``lnprobfn``
:param noise:
A tuple of :py:class:`prospect.likelihood.NoiseModel` objects passed to
``lnprobfn``.
:param lnprobfn: (optional, default: :py:func:`lnprobfn`)
A posterior probability function that can take ``obs``, ``model``,
``sps``, and ``noise`` as keywords. This function must also take a
``nested`` keyword.
Extra Parameters
--------
:param nested_bound: (optional, default: 'multi')
:param nested_sample: (optional, default: 'unif')
:param nested_nlive_init: (optional, default: 100)
:param nested_nlive_batch: (optional, default: 100)
:param nested_dlogz_init: (optional, default: 0.02)
:param nested_maxcall: (optional, default: None)
:param nested_walks: (optional, default: 25)
Returns
--------
:returns result:
An instance of :py:class:`dynesty.results.Results`.
:returns ts:
Duration of sampling in seconds of wall time.
"""
from dynesty.dynamicsampler import stopping_function, weight_function
nested_stop_kwargs = {"post_thresh": nested_posterior_thresh}
lnp = wrap_lnp(lnprobfn, obs, model, sps, noise=noise,
nested=True)
# Need to deal with postkwargs...
t = time.time()
dynestyout = run_dynesty_sampler(lnp, model.prior_transform, model.ndim,
stop_function=stopping_function,
wt_function=weight_function,
nested_stop_kwargs=nested_stop_kwargs,
pool=pool, **kwargs)
ts = time.time() - t
return dynestyout, ts
| 19,201 | 35.575238 | 93 |
py
|
prospector
|
prospector-master/prospect/fitting/minimizer.py
|
import warnings
import numpy as np
from numpy.random import normal, multivariate_normal
__all__ = ["minimize_wrapper", "minimizer_ball", "reinitialize"]
class minimize_wrapper(object):
"""This is a hack to make the minimization function pickleable (for MPI)
even though it requires many arguments. Ripped off from emcee.
"""
def __init__(self, algorithm, function, args, method, options):
self.f = algorithm
self.func = function
self.args = tuple(args)
self.meth = method
self.opts = options
def __call__(self, x):
try:
return self.f(self.func, x, args=self.args,
method=self.meth, **self.opts)
except:
import traceback
print("minimizer: Exception while trying to minimize the function:")
print(" params:", x)
print(" args:", self.args)
print(" exception:")
traceback.print_exc()
raise
def minimizer_ball(center, nminimizers, model, seed=None):
"""Draw initial values from the (1d, separable, independent) priors for
each parameter. Requires that priors have the `sample` method available.
If priors are old-style, draw randomly between min and max.
"""
rand = np.random.RandomState(seed)
size = nminimizers
pinitial = [center]
if size > 1:
ginitial = np.zeros([size - 1, model.ndim])
for p, inds in list(model.theta_index.items()):
for j in range(size-1):
ginitial[j, inds] = model._config_dict[p]['prior'].sample()
pinitial += ginitial.tolist()
return pinitial
def reinitialize(best_guess, model, edge_trunc=0.1, reinit_params=[],
**extras):
"""Check if the Powell minimization found a minimum close to the edge of
the prior for any parameter. If so, reinitialize to the center of the
prior.
This is only done for parameters where ``'reinit':True`` in the model
configuration dictionary, or for parameters in the supplied
``reinit_params`` list.
:param buest_guess:
The result of some sort of optimization step, iterable of length
model.ndim.
:param model:
A ..models.parameters.ProspectorParams() object.
:param edge_trunc: (optional, default 0.1)
The fractional distance from the edge of the priors that triggers
reinitialization.
:param reinit_params: optional
A list of model parameter names to reinitialize, overrides the value or
presence of the ``reinit`` key in the model configuration dictionary.
:returns output:
The best_guess with parameters near the edge reset to be at the center
of the prior. ndarray of shape (ndim,)
"""
warnings.warn("minimizer.reintialize is deprecated", DeprecationWarning)
edge = edge_trunc
bounds = model.theta_bounds()
output = np.array(best_guess)
reinit = np.zeros(model.ndim, dtype=bool)
for p, inds in list(model.theta_index.items()):
reinit[inds] = (model._config_dict[p].get('reinit', False) or
(p in reinit_params))
for k, (guess, bound) in enumerate(zip(best_guess, bounds)):
# Normalize the guess and the bounds
prange = bound[1] - bound[0]
g, b = guess/prange, bound/prange
if ((g - b[0] < edge) or (b[1] - g < edge)) and (reinit[k]):
output[k] = b[0] + prange/2
return output
| 3,482 | 34.907216 | 80 |
py
|
prospector
|
prospector-master/prospect/fitting/__init__.py
|
from .ensemble import run_emcee_sampler, restart_emcee_sampler
from .minimizer import reinitialize
from .nested import run_dynesty_sampler
from .fitting import fit_model, lnprobfn, run_minimize
__all__ = ["fit_model", "lnprobfn",
# below should all be removed/deprecated
"run_emcee_sampler", "restart_emcee_sampler",
"run_dynesty_sampler",
"run_minimize", "reinitialize"]
| 417 | 37 | 62 |
py
|
prospector
|
prospector-master/prospect/fitting/ensemble.py
|
import sys
import numpy as np
from numpy.random import normal, multivariate_normal
try:
import emcee
EMCEE_VERSION = emcee.__version__.split('.')[0]
except(ImportError):
pass
from .convergence import convergence_check
__all__ = ["run_emcee_sampler", "restart_emcee_sampler",
"reinitialize_ball", "sampler_ball",
"emcee_burn"]
def run_emcee_sampler(lnprobfn, initial_center, model,
verbose=True, postargs=[], postkwargs={},
nwalkers=None, nburn=[16], niter=32,
walker_factor=4, prob0=None, storechain=True,
pool=None, hdf5=None, interval=1,
convergence_check_interval=None,
**kwargs):
"""Run an emcee sampler, including iterations of burn-in and re -
initialization. Returns the production sampler.
Parameters
----------
:param lnprobfn:
The posterior probability function.
:param initial_center:
The initial center for the sampler ball
:param model:
An instance of a models.ProspectorParams object.
Optional Parameters
-------------------
:param postargs:
Positional arguments for ``lnprobfn``.
:param postkwargs:
Keyword arguments for ``lnprobfn``.
:param nwalkers:
The number of walkers to use. If None, use the nearest power of two to
``ndim * walker_factor``.
:param niter:
Number of iterations for the production run
:param nburn:
List of the number of iterations to run in each round of burn-in (for
removing stuck walkers.) E.g. `nburn=[32, 64]` will run the sampler for
32 iterations before reinitializing and then run the sampler for
another 64 iterations before starting the production run.
:param storechain: (default: True)
If using HDF5 output, setting this to False will keep the chain from
being held in memory by the sampler object.
:param pool: (optional)
A ``Pool`` object, either from ``multiprocessing`` or from
``emcee.mpi_pool``.
:param hdf5: (optional)
H5py.File object that will be used to store the chain in the datasets
``"chain"`` and ``"lnprobability"``. If not set, the chain will instead
be stored as a numpy array in the returned sampler object
:param interval: (optional, default: 1)
Fraction of the full run at which to flush to disk, if using hdf5 for
output.
:param convergence_check_interval:
How often to assess convergence, in number of iterations. If this is
not `None`, then the KL convergence test is run.
Extra Parameters
----------------
:param convergence_chunks:
The number of iterations to combine when creating the marginalized
parameter probability functions.
:param convergence_stable_points_criteria:
The number of stable convergence checks that the chain must pass before
being declared stable.
"""
# Get dimensions
ndim = model.ndim
if nwalkers is None:
nwalkers = int(2 ** np.round(np.log2(ndim * walker_factor)))
if verbose:
print('number of walkers={}'.format(nwalkers))
# Initialize + burn-in sampler
bsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn,
args=postargs, kwargs=postkwargs,
pool=pool)
initial, in_cent, in_prob = emcee_burn(bsampler, initial_center, nburn, model,
verbose=verbose, prob0=prob0, **kwargs)
# Production run.
# The esampler returned by this method is different instance from the one
# used for burn-in
esampler = restart_emcee_sampler(lnprobfn, initial, niter=niter, verbose=verbose,
postargs=postargs, postkwargs=postkwargs,
pool=pool, hdf5=hdf5, interval=interval,
convergence_check_interval=convergence_check_interval,
storechain=storechain, **kwargs)
return esampler, in_cent, in_prob
def restart_emcee_sampler(lnprobfn, initial, niter=32,
verbose=True, postargs=[], postkwargs={},
storechain=True, pool=None, hdf5=None, interval=1,
convergence_check_interval=None,
**kwargs):
"""Run a sampler from from a specified set of walker positions and run it
for a specified number of iterations.
"""
# Get dimensions
nwalkers, ndim = initial.shape
if verbose:
print('number of walkers={}'.format(nwalkers))
# Initialize sampler
esampler = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn, pool=pool,
args=postargs, kwargs=postkwargs)
# Run
if verbose:
print('starting production')
if convergence_check_interval is None:
esampler = emcee_production(esampler, initial, niter, pool=pool,
hdf5=hdf5, interval=interval, storechain=storechain)
else:
cnvrg_production = emcee_production_convergence
esampler = cnvrg_production(esampler, initial, niter, pool=pool, verbose=verbose,
hdf5=hdf5, interval=interval, storechain=storechain,
convergence_check_interval=convergence_check_interval,
**kwargs)
if verbose:
print('done production')
return esampler
def emcee_production(esampler, initial, niter, pool=None,
hdf5=None, interval=1, storechain=True,
**extras):
"""
"""
# Production run
esampler.reset()
# Do some emcee version specific choices
if EMCEE_VERSION == '3':
ndim = esampler.ndim
nwalkers = esampler.nwalkers
mc_args = {"store": storechain,
"iterations": niter}
else:
ndim = esampler.dim
nwalkers = esampler.k
mc_args = {"storechain": storechain,
"iterations": niter}
if hdf5 is not None:
# Set up hdf5 backend
sdat = hdf5.create_group('sampling')
# static dataset
chain = sdat.create_dataset("chain", (nwalkers, niter, ndim))
lnpout = sdat.create_dataset("lnprobability", (nwalkers, niter))
else:
storechain = True
for i, result in enumerate(esampler.sample(initial, **mc_args)):
if hdf5 is not None:
chain[:, i, :] = result[0]
lnpout[:, i] = result[1]
if (np.mod(i+1, int(interval*niter)) == 0) or (i+1 == niter):
# do stuff every once in awhile
# this would be the place to put some callback functions
# e.g. [do(result, i, esampler) for do in things_to_do]
# like, should probably store the random state too.
hdf5.flush()
return esampler
def emcee_production_convergence(esampler, initial, niter, pool=None, verbose=True,
hdf5=None, interval=1, storechain=True,
convergence_check_interval=None,
convergence_chunks=325,
convergence_stable_points_criteria=3,
**kwargs):
"""
"""
if hdf5 is None:
print("Online convergence checking requires HDF5 backend")
esampler.reset()
# Do some emcee version specific choices
if EMCEE_VERSION == '3':
ndim = esampler.ndim
nwalkers = esampler.nwalkers
mc_args = {"store": storechain,
"iterations": niter}
else:
ndim = esampler.dim
nwalkers = esampler.k
mc_args = {"storechain": storechain,
"iterations": niter}
# Set up hdf5 backend
sdat = hdf5.create_group('sampling')
# dynamic dataset
conv_int = convergence_check_interval
conv_crit = convergence_stable_points_criteria
nfirstcheck = (2 * convergence_chunks + conv_int * (conv_crit - 1))
chain = sdat.create_dataset('chain', (nwalkers, nfirstcheck, ndim),
maxshape=(nwalkers, None, ndim))
lnpout = sdat.create_dataset('lnprobability', (nwalkers, nfirstcheck),
maxshape=(nwalkers, None))
kl = sdat.create_dataset('kl_divergence', (conv_crit, ndim),
maxshape=(None, ndim))
kl_iter = sdat.create_dataset('kl_iteration', (conv_crit,),
maxshape=(None,))
# Main loop over iterations of the MCMC sampler
for i, result in enumerate(esampler.sample(initial, **mc_args)):
chain[:, i, :] = result[0]
lnpout[:, i] = result[1]
do_convergence_check = ((convergence_check_interval is not None) and
(i+1 >= nfirstcheck) and
((i+1 - nfirstcheck) % convergence_check_interval == 0))
if do_convergence_check:
if verbose:
print('checking convergence after iteration {0}').format(i+1)
converged, info = convergence_check(chain,
convergence_check_interval=conv_int,
convergence_stable_points_criteria=conv_crit,
convergence_chunks=convergence_chunks, **kwargs)
kl[:, :] = info['kl_test']
kl_iter[:] = info['iteration']
hdf5.flush()
if converged:
if verbose:
print('converged, ending emcee.')
break
else:
if verbose:
print('not converged, continuing.')
if (i+1 > (niter-convergence_check_interval)):
# if we're going to exit soon, do something fancy
ngrow = niter - (i + 1)
chain.resize(chain.shape[1] + ngrow, axis=1)
lnpout.resize(lnpout.shape[1] + ngrow, axis=1)
else:
# else extend by convergence_check_interval
chain.resize(chain.shape[1] + convergence_check_interval, axis=1)
lnpout.resize(lnpout.shape[1] + convergence_check_interval, axis=1)
kl.resize(kl.shape[0] + 1, axis=0)
kl_iter.resize(kl_iter.shape[0] + 1, axis=0)
if (np.mod(i+1, int(interval*niter)) == 0) or (i+1 == niter):
# do stuff every once in awhile
# stuff
hdf5.flush()
return esampler
def emcee_burn(sampler, initial_center, nburn, model=None, prob0=None,
initial_disp=0.1, verbose=True, **kwargs):
"""Run the emcee sampler for nburn iterations, reinitializing after each
round.
:param nburn:
List giving the number of iterations in each round of burn-in.
E.g. nburn=[32, 64] will run the sampler for 32 iterations before
reinitializing and then run the sampler for another 64 iterations
"""
# Do some emcee version specific choices
if EMCEE_VERSION == '3':
nwalkers = sampler.nwalkers
mc_args = {"store": True}
else:
nwalkers = sampler.k
mc_args = {"storechain": True}
# Set up initial positions
model.set_parameters(initial_center)
disps = model.theta_disps(default_disp=initial_disp)
limits = np.array(model.theta_bounds()).T
if hasattr(model, 'theta_disp_floor'):
disp_floor = model.theta_disp_floor()
else:
disp_floor = 0.0
disps = np.sqrt(disps**2 + disp_floor**2)
initial = resample_until_valid(sampler_ball, initial_center, disps, nwalkers,
limits=limits, prior_check=model)
# Start the burn-in
epos = initial
for k, iburn in enumerate(nburn):
epos, eprob, state = sampler.run_mcmc(initial, iburn, **mc_args)
# Find best walker position
best = sampler.flatlnprobability.argmax()
# Is new position better than old position?
if prob0 is None or sampler.flatlnprobability[best] > prob0:
prob0 = sampler.flatlnprobability[best]
initial_center = sampler.flatchain[best, :]
if k == len(nburn):
# Done burning.
if verbose:
print('done all burn-in.')
# Don't construct new sampler ball after last burn-in.
sampler.reset()
continue
if epos.shape[0] < model.ndim*2:
initial = reinitialize_ball(epos, eprob, center=initial_center,
limits=limits, disp_floor=disp_floor,
prior_check=model, **kwargs)
else:
initial = reinitialize_ball_covar(epos, eprob, center=initial_center,
limits=limits, disp_floor=disp_floor,
prior_check=model, **kwargs)
sampler.reset()
if verbose:
print('done burn #{} ({} iterations)'.format(k, iburn))
return epos, initial_center, prob0
def reinitialize_ball_covar(pos, prob, threshold=50.0, center=None,
disp_floor=0.0, **extras):
"""Estimate the parameter covariance matrix from the positions of a
fraction of the current ensemble and sample positions from the multivariate
gaussian corresponding to that covariance matrix. If ``center`` is not
given the center will be the mean of the (fraction of) the ensemble.
:param pos:
The current positions of the ensemble, ndarray of shape (nwalkers, ndim)
:param prob:
The current probabilities of the ensemble, used to reject some fraction
of walkers with lower probability (presumably stuck walkers). ndarray
of shape (nwalkers,)
:param threshold: default 50.0
Float in the range [0,100] giving the fraction of walkers to throw away
based on their ``prob`` before estimating the covariance matrix.
:param center: optional
The center of the multivariate gaussian. If not given or ``None``, then
the center will be estimated from the mean of the postions of the
acceptable walkers. ndarray of shape (ndim,)
:param limits: optional
An ndarray of shape (2, ndim) giving lower and upper limits for each
parameter. The newly generated values will be clipped to these limits.
If the result consists only of the limit then a vector of small random
numbers will be added to the result.
:returns pnew:
New positions for the sampler, ndarray of shape (nwalker, ndim)
"""
pos = np.atleast_2d(pos)
nwalkers = prob.shape[0]
good = prob > np.percentile(prob, threshold)
if center is None:
center = pos[good, :].mean(axis=0)
Sigma = np.cov(pos[good, :].T)
Sigma[np.diag_indices_from(Sigma)] += disp_floor**2
pnew = resample_until_valid(multivariate_normal, center, Sigma,
nwalkers, **extras)
return pnew
def reinitialize_ball(pos, prob, center=None, ptiles=[25, 50, 75],
disp_floor=0., **extras):
"""Choose the best walker and build a ball around it based on the other
walkers. The scatter in the new ball is based on the interquartile range
for the walkers in their current positions
"""
pos = np.atleast_2d(pos)
nwalkers = pos.shape[0]
if center is None:
center = pos[prob.argmax(), :]
tmp = np.percentile(pos, ptiles, axis=0)
# 1.35 is the ratio between the 25-75% interquartile range and 1
# sigma (for a normal distribution)
scatter = np.abs((tmp[2] - tmp[0]) / 1.35)
scatter = np.sqrt(scatter**2 + disp_floor**2)
pnew = resample_until_valid(sampler_ball, initial_center, scatter,
nwalkers, **extras)
return pnew
def resample_until_valid(sampling_function, center, sigma, nwalkers,
limits=None, maxiter=1e3, prior_check=None, **extras):
"""Sample from the sampling function, with optional clipping to prior
bounds and resampling in the case of parameter positions that are outside
complicated custom priors.
:param sampling_function:
The sampling function to use, it must have the calling sequence
``sampling_function(center, sigma, size=size)``
:param center:
The center of the distribution
:param sigma:
Array describing the scatter of the distribution in each dimension.
Can be two-dimensional, e.g. to describe a covariant multivariate
normal (if the sampling function takes such a thing).
:param nwalkers:
The number of valid samples to produce.
:param limits: (optional)
Simple limits on the parameters, passed to ``clip_ball``.
:param prior_check: (optional)
An object that has a ``prior_product()`` method which returns the prior
ln(probability) for a given parameter position.
:param maxiter:
Maximum number of iterations to try resampling before giving up and
returning a set of parameter positions at least one of which is not
within the prior.
:returns pnew:
New parameter positions, ndarray of shape (nwalkers, ndim)
"""
invalid = np.ones(nwalkers, dtype=bool)
pnew = np.zeros([nwalkers, len(center)])
for i in range(int(maxiter)):
# replace invalid elements with new samples
tmp = sampling_function(center, sigma, size=invalid.sum())
pnew[invalid, :] = tmp
if limits is not None:
# clip to simple limits
if sigma.ndim > 1:
diag = np.diag(sigma)
else:
diag = sigma
pnew = clip_ball(pnew, limits, diag)
if prior_check is not None:
# check the prior
lnp = np.array([prior_check.prior_product(pos) for pos in pnew])
invalid = ~np.isfinite(lnp)
if invalid.sum() == 0:
# everything is valid, return
return pnew
else:
# No prior check, return on first iteration
return pnew
# reached maxiter, return whatever exists so far
print("initial position resampler hit ``maxiter``")
return pnew
def sampler_ball(center, disp, size=1):
"""Produce a ball around a given position. This should probably be a
one-liner.
"""
ndim = center.shape[0]
if np.size(disp) == 1:
disp = np.zeros(ndim) + disp
pos = normal(size=[size, ndim]) * disp[None, :] + center[None, :]
return pos
def clip_ball(pos, limits, disp):
"""Clip to limits. If all samples below (above) limit, add (subtract) a
uniform random number (scaled by ``disp``) to the limit.
"""
npos = pos.shape[0]
pos = np.clip(pos, limits[0][None, :], limits[1][None, :])
for i, p in enumerate(pos.T):
u = np.unique(p)
if len(u) == 1:
tiny = disp[i] * np.random.uniform(0, disp[i], npos)
if u == limits[0, i]:
pos[:, i] += tiny
if u == limits[1, i]:
pos[:, i] -= tiny
return pos
def restart_sampler(sample_results, lnprobf, sps, niter,
nthreads=1, pool=None):
"""Restart a sampler from its last position and run it for a specified
number of iterations. The sampler chain and the model object should be
given in the sample_results dictionary. Note that lnprobfn and sps must be
defined at the global level in the same way as the sampler originally ran,
or your results will be super weird (and wrong)!
Unimplemented/tested
"""
model = sample_results['model']
initial = sample_results['chain'][:, -1, :]
nwalkers, ndim = initial.shape
esampler = emcee.EnsembleSampler(nwalkers, ndim, lnprobf, args=[model],
threads=nthreads, pool=pool)
epos, eprob, state = esampler.run_mcmc(initial, niter, rstate0=state)
return esampler
| 20,408 | 38.399614 | 96 |
py
|
prospector
|
prospector-master/prospect/fitting/convergence.py
|
import numpy as np
__all__ = ["convergence_check", "make_kl_bins", "kl_divergence",
"find_subsequence"]
def find_subsequence(subseq, seq):
"""If subsequence exists in sequence, return True. otherwise return False.
can be modified to return the appropriate index (useful to test WHERE a
chain is converged)
"""
i, n, m = -1, len(seq), len(subseq)
try:
while True:
i = seq.index(subseq[0], i + 1, n - m + 1)
if subseq == seq[i:i + m]:
#return i+m-1 (could return "converged" index here)
return True
except ValueError:
return False
def kl_divergence(pdf1, pdf2):
"""Calculates Kullback-Leibler (KL) divergence for two discretized PDFs
"""
idx = (pdf1 != 0) # no contribution from bins where there is no density in the target PDF
pdf1 = pdf1 / float(pdf1.sum())
pdf2 = pdf2 / float(pdf2.sum())
dl = pdf1[idx] * np.log(pdf1[idx] / pdf2[idx])
return dl.sum()
def make_kl_bins(chain, nbins=10):
"""Create bins with an ~equal number of data points in each when there are
empty bins, the KL divergence is undefined this adaptive binning scheme
avoids that problem
"""
sorted = np.sort(chain)
nskip = np.floor(chain.shape[0]/float(nbins)).astype(int)-1
bins = sorted[::nskip]
bins[-1] = sorted[-1] # ensure the maximum bin is the maximum of the chain
assert bins.shape[0] == nbins+1
pdf, bins = np.histogram(chain, bins=bins)
return pdf, bins
def convergence_check(chain, convergence_check_interval=None, convergence_chunks=325,
convergence_stable_points_criteria=3, convergence_nhist=50,
convergence_kl_threshold=0.018, **kwargs):
"""Performs a Kullback-Leibler divergence test for convergence.
:param chain:
The chain to perform the test on.
:param convergence_check_interval:
How often to assess convergence, in number of iterations.
:param convergence_chunks:
The number of iterations to combine when creating the marginalized
parameter probability functions.
:param convergence_stable_points_criteria:
The number of stable convergence checks that the chain must pass before
being declared stable.
:param convergence_nhist:
Controls how finely the PDF is subsampled before comparison. This has a
strong effect on the normalization of the KL divergence. Larger -->
more noise but finer distinction between PDF shapes.
:param convergence_kl_threshold:
The convergence criteria for the KL test. Suggest running multiple long
chains and plotting the KL divergence in each parameter to determine
how to set this.
:returns convergence_flag:
True if converged. False if not.
:returns outdict:
Contains the results of the KL test for each parameter (number of
checks, number of parameters) and the iteration where this was
calculated.
"""
nwalkers, niter, npars = chain.shape
# Define some useful quantities
niter_check_start = 2*convergence_chunks # must run for at least 2 intervals before checking!
ncheck = np.floor((niter-niter_check_start)/float(convergence_check_interval)).astype(int)+1
# Calculate the K-L divergence in each chunk for each parameter
kl = np.zeros(shape=(ncheck, npars))
xiter = np.arange(ncheck) * convergence_check_interval + niter_check_start
for n in range(ncheck):
for i in range(npars):
# Define chains and calculate pdf
lo = (xiter[n] - 2*convergence_chunks)
hi = (xiter[n] - convergence_chunks)
early_test_chain = chain[:, lo:hi, i].flatten()
pdf_early, bins = make_kl_bins(early_test_chain, nbins=convergence_nhist)
# clip test chain so that it's all contained in bins
# basically redefining first and last bin to have open edges
late_test_chain = np.clip(chain[:, hi:xiter[n], i].flatten(),
bins[0], bins[-1])
pdf_late, _ = np.histogram(late_test_chain, bins=bins)
kl[n, i] = kl_divergence(pdf_late, pdf_early)
# Check for convergence
converged_idx = np.all(kl < convergence_kl_threshold, axis=1)
convergence_flag = find_subsequence([True]*convergence_stable_points_criteria,
converged_idx.tolist())
outdict = {'iteration': xiter, 'kl_test': kl}
return convergence_flag, outdict
| 4,582 | 37.512605 | 97 |
py
|
prospector
|
prospector-master/prospect/io/read_results.py
|
import sys, os
from copy import deepcopy
import warnings
import pickle, json
import numpy as np
try:
import h5py
except:
pass
try:
from sedpy.observate import load_filters
except:
pass
"""Convenience functions for reading and reconstructing results from a fitting
run, including reconstruction of the model for making posterior samples
"""
__all__ = ["results_from", "emcee_restarter",
"get_sps", "get_model",
"traceplot", "subcorner",
"compare_paramfile"]
def unpick(pickled):
"""create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
"""
try:
obj = pickle.loads(pickled, encoding='bytes')
except(TypeError):
obj = pickle.loads(pickled)
return obj
def results_from(filename, model_file=None, dangerous=True, **kwargs):
"""Read a results file with stored model and MCMC chains.
:param filename:
Name and path to the file holding the results. If ``filename`` ends in
"h5" then it is assumed that this is an HDF5 file, otherwise it is
assumed to be a pickle.
:param dangerous: (default, True)
If True, use the stored paramfile text to import the parameter file and
reconstitute the model object. This executes code in the stored
paramfile text during import, and is therefore dangerous.
:returns results:
A dictionary of various results including:
+ `"chain"` - Samples from the posterior probability (ndarray).
+ `"lnprobability"` - The posterior probability of each sample.
+ `"weights"` - The weight of each sample, if `dynesty` was used.
+ `"theta_labels"` - List of strings describing free parameters.
+ `"bestfit"` - The prediction of the data for the posterior sample with
the highest `"lnprobability"`, as a dictionary.
+ `"run_params"` - A dictionary of arguments supplied to prospector at
the time of the fit.
+ `"paramfile_text"` - Text of the file used to run prospector, string
:returns obs:
The obs dictionary
:returns model:
The models.SedModel() object, if it could be regenerated from the stored
`"paramfile_text"`. Otherwise, `None`.
"""
# Read the basic chain, parameter, and run_params info
if filename.split('.')[-1] == 'h5':
res = read_hdf5(filename, **kwargs)
if "_mcmc.h5" in filename:
mf_default = filename.replace('_mcmc.h5', '_model')
else:
mf_default = "x"
else:
with open(filename, 'rb') as rf:
res = pickle.load(rf)
mf_default = filename.replace('_mcmc', '_model')
# Now try to read the model object itself from a pickle
if model_file is None:
mname = mf_default
else:
mname = model_file
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
model, powell_results = read_model(mname, param_file=param_file,
dangerous=dangerous, **kwargs)
if dangerous:
try:
model = get_model(res)
except:
model = None
res['model'] = model
if powell_results is not None:
res["powell_results"] = powell_results
return res, res["obs"], model
def emcee_restarter(restart_from="", niter=32, **kwargs):
"""Get the obs, model, and sps objects from a previous run, as well as the
run_params and initial positions (which are determined from the end of the
last run, and inserted into the run_params dictionary)
:param restart_from:
Name of the file to restart the sampling from. An error is raised if
this does not include an emcee style chain of shape (nwalker, niter,
ndim)
:param niter: (default: 32)
Number of additional iterations to do (added toi run_params)
:returns obs:
The `obs` dictionary used in the last run.
:returns model:
The model object used in the last run.
:returns sps:
The `sps` object used in the last run.
:returns noise:
A tuple of (None, None), since it is assumed the noise model in the
last run was trivial.
:returns run_params:
A dictionary of parameters controlling the operation. This is the same
as used in the last run, but with the "niter" key changed, and a new
"initial_positions" key that gives the ending positions of the emcee
walkers from the last run. The filename from which the run is
restarted is also stored in the "restart_from" key.
"""
result, obs, model = results_from(restart_from)
noise = (None, None)
# check for emcee style outputs
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
msg = "Result file {} does not have a chain of the proper shape."
assert is_emcee, msg.format(restart_from)
sps = get_sps(result)
run_params = deepcopy(result["run_params"])
run_params["niter"] = niter
run_params["restart_from"] = restart_from
initial_positions = result["chain"][:, -1, :]
run_params["initial_positions"] = initial_positions
return obs, model, sps, noise, run_params
def read_model(model_file, param_file=('', ''), dangerous=False, **extras):
"""Read the model pickle. This can be difficult if there are user defined
functions that have to be loaded dynamically. In that case, import the
string version of the paramfile and *then* try to unpickle the model
object.
:param model_file:
String, name and path to the model pickle.
:param dangerous: (default: False)
If True, try to import the given paramfile.
:param param_file:
2-element tuple. The first element is the name of the paramfile, which
will be used to set the name of the imported module. The second
element is the param_file contents as a string. The code in this
string will be imported.
"""
model = powell_results = None
if os.path.exists(model_file):
try:
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
except(AttributeError):
# Here one can deal with module and class names that changed
with open(model_file, 'rb') as mf:
mod = load(mf)
except(ImportError, KeyError):
# here we load the parameter file as a module using the stored
# source string. Obviously this is dangerous as it will execute
# whatever is in the stored source string. But it can be used to
# recover functions (especially dependcy functions) that are user
# defined
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
if dangerous:
user_module = import_module_from_string(param_file[1], modname)
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
model = mod['model']
for k, v in list(model.theta_index.items()):
if type(v) is tuple:
model.theta_index[k] = slice(*v)
powell_results = mod['powell']
return model, powell_results
def read_hdf5(filename, **extras):
"""Read an HDF5 file (with a specific format) into a dictionary of results.
This HDF5 file is assumed to have the groups ``sampling`` and ``obs`` which
respectively contain the sampling chain and the observational data used in
the inference.
All attributes of these groups as well as top-level attributes are loaded
into the top-level of the dictionary using ``json.loads``, and therefore
must have been written with ``json.dumps``. This should probably use
JSONDecoders, but who has time to learn that.
:param filename:
Name of the HDF5 file.
"""
groups = {"sampling": {}, "obs": {},
"bestfit": {}, "optimization": {}}
res = {}
with h5py.File(filename, "r") as hf:
# loop over the groups
for group, d in groups.items():
# check the group exists
if group not in hf:
continue
# read the arrays in that group into the dictionary for that group
for k, v in hf[group].items():
d[k] = np.array(v)
# unserialize the attributes and put them in the dictionary
for k, v in hf[group].attrs.items():
try:
d[k] = json.loads(v)
except:
try:
d[k] = unpick(v)
except:
d[k] = v
# do top-level attributes.
for k, v in hf.attrs.items():
try:
res[k] = json.loads(v)
except:
try:
res[k] = unpick(v)
except:
res[k] = v
res.update(groups['sampling'])
res["bestfit"] = groups["bestfit"]
res["optimization"] = groups["optimization"]
res['obs'] = groups['obs']
try:
res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])
except:
pass
try:
res['rstate'] = unpick(res['rstate'])
except:
pass
#try:
# mp = [names_to_functions(p.copy()) for p in res['model_params']]
# res['model_params'] = mp
#except:
# pass
return res
def read_pickles(filename, **kwargs):
"""Alias for backwards compatability. Calls `results_from()`.
"""
return results_from(filename, **kwargs)
def get_sps(res):
"""This gets exactly the SPS object used in the fiting (modulo any
changes to FSPS itself).
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_sps` method defined in the
paramfile module.
:param res:
A results dictionary (the output of `results_from()`)
:returns sps:
An sps object (i.e. from prospect.sources)
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
sps = user_module.load_sps(**res['run_params'])
except(AttributeError):
sps = user_module.build_sps(**res['run_params'])
# Now check that the SSP libraries are consistent
flib = res['run_params'].get('sps_libraries', None)
try:
rlib = sps.ssp.libraries
except(AttributeError):
rlib = None
if (flib is None) or (rlib is None):
warnings.warn("Could not check SSP library versions.")
else:
liberr = ("The FSPS libraries used in fitting({}) are not the "
"same as the FSPS libraries that you are using now ({})".format(flib, rlib))
# If fitting and reading in are happening in different python versions,
# ensure string comparison doesn't throw error:
if type(flib[0]) == 'bytes':
flib = [i.decode() for i in flib]
if type(rlib[0]) == 'bytes':
rlib = [i.decode() for i in rlib]
assert (flib[0] == rlib[0]) and (flib[1] == rlib[1]), liberr
return sps
def get_model(res):
"""This gets exactly the model object used in the fiting.
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_model` method defined in the
paramfile module, with `run_params` dictionary passed to it.
:param res:
A results dictionary (the output of `results_from()`)
:returns model:
A prospect.models.SedModel object
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
model = user_module.load_model(**res['run_params'])
except(AttributeError):
model = user_module.build_model(**res['run_params'])
return model
def import_module_from_string(source, name, add_to_sys_modules=True):
"""Well this seems dangerous.
"""
import imp
user_module = imp.new_module(name)
exec(source, user_module.__dict__)
if add_to_sys_modules:
sys.modules[name] = user_module
return user_module
def traceplot(results, showpars=None, start=0, chains=slice(None),
figsize=None, truths=None, **plot_kwargs):
"""Plot the evolution of each parameter value with iteration #, for each
walker in the chain.
:param results:
A Prospector results dictionary, usually the output of
``results_from('resultfile')``.
:param showpars: (optional)
A list of strings of the parameters to show. Defaults to all
parameters in the ``"theta_labels"`` key of the ``sample_results``
dictionary.
:param chains:
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for to keep the plot from getting too cluttered.
:param start: (optional, default: 0)
Integer giving the iteration number from which to start plotting.
:param **plot_kwargs:
Extra keywords are passed to the
``matplotlib.axes._subplots.AxesSubplot.plot()`` method.
:returns tracefig:
A multipaneled Figure object that shows the evolution of walker
positions in the parameters given by ``showpars``, as well as
ln(posterior probability)
"""
import matplotlib.pyplot as pl
# Get parameter names
try:
parnames = np.array(results['theta_labels'])
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, lnp, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start:, :]
lnp = np.atleast_2d(results['lnprobability'])[chains, start:]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start:]
nwalk = trace.shape[0]
# Set up plot windows
ndim = len(parnames) + 1
nx = int(np.floor(np.sqrt(ndim)))
ny = int(np.ceil(ndim * 1.0 / nx))
sz = np.array([nx, ny])
factor = 3.0 # size of one side of one panel
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 * factor # w/hspace size
plotdim = factor * sz + factor * (sz - 1) * whspace
dim = lbdim + plotdim + trdim
if figsize is None:
fig, axes = pl.subplots(nx, ny, figsize=(dim[1], dim[0]), sharex=True)
else:
fig, axes = pl.subplots(nx, ny, figsize=figsize, sharex=True)
axes = np.atleast_2d(axes)
#lb = lbdim / dim
#tr = (lbdim + plotdim) / dim
#fig.subplots_adjust(left=lb[1], bottom=lb[0], right=tr[1], top=tr[0],
# wspace=whspace, hspace=whspace)
# Sequentially plot the chains in each parameter
for i in range(ndim - 1):
ax = axes.flat[i]
for j in range(nwalk):
ax.plot(trace[j, :, i], **plot_kwargs)
ax.set_title(parnames[i], y=1.02)
# Plot lnprob
ax = axes.flat[-1]
for j in range(nwalk):
ax.plot(lnp[j, :], **plot_kwargs)
ax.set_title('lnP', y=1.02)
[ax.set_xlabel("iteration") for ax in axes[-1, :]]
#[ax.set_xticklabels('') for ax in axes[:-1, :].flat]
if truths is not None:
for i, t in enumerate(truths[ind_show]):
axes.flat[i].axhline(t, color='k', linestyle=':')
pl.tight_layout()
return fig
def param_evol(results, **kwargs):
"""Backwards compatability
"""
return traceplot(results, **kwargs)
def subcorner(results, showpars=None, truths=None,
start=0, thin=1, chains=slice(None),
logify=["mass", "tau"], **kwargs):
"""Make a triangle plot of the (thinned, latter) samples of the posterior
parameter space. Optionally make the plot only for a supplied subset of
the parameters.
:param showpars: (optional)
List of string names of parameters to include in the corner plot.
:param truths: (optional)
List of truth values for the chosen parameters.
:param start: (optional, default: 0)
The iteration number to start with when drawing samples to plot.
:param thin: (optional, default: 1)
The thinning of each chain to perform when drawing samples to plot.
:param chains: (optional)
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for emoving stuck walkers.
:param kwargs:
Remaining keywords are passed to the ``corner`` plotting package.
:param logify:
A list of parameter names to plot in `log10(parameter)` instead of
`parameter`
"""
try:
import corner as triangle
except(ImportError):
import triangle
except:
raise ImportError("Please install the `corner` package.")
# pull out the parameter names and flatten the thinned chains
# Get parameter names
try:
parnames = np.array(results['theta_labels'], dtype='U20')
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([parnames.tolist().index(p) for p in showpars])
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start::thin, :]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start::thin]
samples = trace.reshape(trace.shape[0] * trace.shape[1], trace.shape[2])
# logify some parameters
xx = samples.copy()
if truths is not None:
xx_truth = np.array(truths).copy()
else:
xx_truth = None
for p in logify:
if p in parnames:
idx = parnames.tolist().index(p)
xx[:, idx] = np.log10(xx[:, idx])
parnames[idx] = "log({})".format(parnames[idx])
if truths is not None:
xx_truth[idx] = np.log10(xx_truth[idx])
# mess with corner defaults
corner_kwargs = {"plot_datapoints": False, "plot_density": False,
"fill_contours": True, "show_titles": True}
corner_kwargs.update(kwargs)
fig = triangle.corner(xx, labels=parnames, truths=xx_truth,
quantiles=[0.16, 0.5, 0.84], weights=wghts, **corner_kwargs)
return fig
def subtriangle(results, **kwargs):
"""Backwards compatability
"""
return subcorner(results, **kwargs)
def compare_paramfile(res, filename):
"""Compare the runtime parameter file text stored in the `res` dictionary
to the text of some existing file with fully qualified path `filename`.
"""
from pprint import pprint
from difflib import unified_diff
a = res["paramfile_text"]
aa = a.split('\n')
with open(filename, "r") as f:
b = json.dumps(f.read())
bbl = json.loads(b)
bb = bbl.split('\n')
pprint([l for l in unified_diff(aa, bb)])
def names_to_functions(p):
"""Replace names of functions (or pickles of objects) in a parameter
description with the actual functions (or pickles).
"""
from importlib import import_module
for k, v in list(p.items()):
try:
m = import_module(v[1])
f = m.__dict__[v[0]]
except:
try:
f = pickle.loads(v)
except:
f = v
p[k] = f
return p
| 20,576 | 33.525168 | 95 |
py
|
prospector
|
prospector-master/prospect/io/write_results.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" write_results.py - Methods for writing prospector ingredients and outputs
to HDF5 files as well as to pickles.
"""
import os, time, warnings
import pickle, json, base64
import numpy as np
try:
import h5py
_has_h5py_ = True
except(ImportError):
_has_h5py_ = False
__all__ = ["githash", "write_pickles", "write_hdf5",
"chain_to_struct"]
unserial = json.dumps('Unserializable')
def pick(obj):
"""create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
"""
return np.void(pickle.dumps(obj, 0))
#def run_command(cmd):
# """Open a child process, and return its exit status and stdout.
# """
# import subprocess
# child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# out = [s for s in child.stdout]
# w = child.wait()
# return os.WEXITSTATUS(w), out
def githash(**extras):
"""Pull out the git hash history for Prospector here.
"""
try:
from .._version import __version__, __githash__
bgh = __version__, __githash__
except(ImportError):
warnings.warn("Could not obtain prospector version info", RuntimeWarning)
bgh = "Can't get version number."
return bgh
def paramfile_string(param_file=None, **extras):
try:
with open(param_file, "r") as pfile:
pstr = pfile.read()
except:
warnings.warn("Could not store paramfile text", RuntimeWarning)
pstr = ''
return pstr
def write_hdf5(hfile, run_params, model, obs, sampler=None,
optimize_result_list=None, tsample=0.0, toptimize=0.0,
sampling_initial_center=[], sps=None, **extras):
"""Write output and information to an HDF5 file object (or
group).
:param hfile:
File to which results will be written. Can be a string name or an
`h5py.File` object handle.
:param run_params:
The dictionary of arguments used to build and fit a model.
:param model:
The `prospect.models.SedModel` object.
:param obs:
The dictionary of observations that were fit.
:param sampler:
The `emcee` or `dynesty` sampler object used to draw posterior samples.
Can be `None` if only optimization was performed.
:param optimize_result_list:
A list of `scipy.optimize.OptimizationResult` objects generated during
the optimization stage. Can be `None` if no optimization is performed
param sps: (optional, default: None)
If a `prospect.sources.SSPBasis` object is supplied, it will be used to
generate and store
"""
try:
# If ``hfile`` is not a file object, assume it is a filename and open
hf = h5py.File(hfile, "a")
except(AttributeError, TypeError):
hf = hfile
except(NameError):
warnings.warn("HDF5 file could not be opened, as h5py could not be imported.")
return
# ----------------------
# Sampling info
try:
# emcee
a = sampler.acceptance_fraction
write_emcee_h5(hf, sampler, model, sampling_initial_center, tsample)
except(AttributeError):
# dynesty or nestle
if sampler is None:
sdat = hf.create_group('sampling')
elif 'eff' in sampler:
write_dynesty_h5(hf, sampler, model, tsample)
else:
write_nestle_h5(hf, sampler, model, tsample)
# -----------------
# Optimizer info
if optimize_result_list is not None:
out = optresultlist_to_ndarray(optimize_result_list)
mdat = hf.create_dataset('optimization', data=out)
# ----------------------
# High level parameter and version info
write_h5_header(hf, run_params, model)
hf.attrs['optimizer_duration'] = json.dumps(toptimize)
hf.flush()
# ----------------------
# Observational data
write_obs_to_h5(hf, obs)
hf.flush()
# ---------------
# Best fitting model in space of data
if sps is not None:
if "sampling/chain" in hf:
from ..utils.plotting import get_best
_, pbest = get_best(hf["sampling"])
spec, phot, mfrac = model.predict(pbest, obs=obs, sps=sps)
best = hf.create_group("bestfit")
best.create_dataset("spectrum", data=spec)
best.create_dataset("photometry", data=phot)
best.create_dataset("parameter", data=pbest)
best.attrs["mfrac"] = mfrac
if obs["wavelength"] is None:
best.create_dataset("restframe_wavelengths", data=sps.wavelengths)
# Store the githash last after flushing since getting it might cause an
# uncatchable crash
bgh = githash(**run_params)
hf.attrs['prospector_version'] = json.dumps(bgh)
hf.close()
def write_emcee_h5(hf, sampler, model, sampling_initial_center, tsample):
"""Write emcee information to the provided HDF5 file in the `sampling`
group.
"""
try:
sdat = hf['sampling']
except(KeyError):
sdat = hf.create_group('sampling')
if 'chain' not in sdat:
sdat.create_dataset('chain',
data=sampler.chain)
lnp = sampler.lnprobability
if ((lnp.shape[0] != lnp.shape[1]) &
(lnp.T.shape == sampler.chain.shape[:-1])):
# hack to deal with emcee3rc lnprob transposition
lnp = lnp.T
sdat.create_dataset('lnprobability', data=lnp)
sdat.create_dataset('acceptance',
data=sampler.acceptance_fraction)
sdat.create_dataset('sampling_initial_center',
data=sampling_initial_center)
sdat.create_dataset('initial_theta',
data=model.initial_theta.copy())
# JSON Attrs
sdat.attrs['rstate'] = pick(sampler.random_state)
sdat.attrs['sampling_duration'] = json.dumps(tsample)
sdat.attrs['theta_labels'] = json.dumps(list(model.theta_labels()))
hf.flush()
def write_nestle_h5(hf, nestle_out, model, tsample):
"""Write nestle results to the provided HDF5 file in the `sampling` group.
"""
try:
sdat = hf['sampling']
except(KeyError):
sdat = hf.create_group('sampling')
sdat.create_dataset('chain',
data=nestle_out['samples'])
sdat.create_dataset('weights',
data=nestle_out['weights'])
sdat.create_dataset('lnlikelihood',
data=nestle_out['logl'])
sdat.create_dataset('lnprobability',
data=(nestle_out['logl'] +
model.prior_product(nestle_out['samples'])))
sdat.create_dataset('logvol',
data=nestle_out['logvol'])
sdat.create_dataset('logz',
data=np.atleast_1d(nestle_out['logz']))
sdat.create_dataset('logzerr',
data=np.atleast_1d(nestle_out['logzerr']))
sdat.create_dataset('h_information',
data=np.atleast_1d(nestle_out['h']))
# JSON Attrs
for p in ['niter', 'ncall']:
sdat.attrs[p] = json.dumps(nestle_out[p])
sdat.attrs['theta_labels'] = json.dumps(list(model.theta_labels()))
sdat.attrs['sampling_duration'] = json.dumps(tsample)
hf.flush()
def write_dynesty_h5(hf, dynesty_out, model, tsample):
"""Write nestle results to the provided HDF5 file in the `sampling` group.
"""
try:
sdat = hf['sampling']
except(KeyError):
sdat = hf.create_group('sampling')
sdat.create_dataset('chain',
data=dynesty_out['samples'])
sdat.create_dataset('weights',
data=np.exp(dynesty_out['logwt']-dynesty_out['logz'][-1]))
sdat.create_dataset('logvol',
data=dynesty_out['logvol'])
sdat.create_dataset('logz',
data=np.atleast_1d(dynesty_out['logz']))
sdat.create_dataset('logzerr',
data=np.atleast_1d(dynesty_out['logzerr']))
sdat.create_dataset('information',
data=np.atleast_1d(dynesty_out['information']))
sdat.create_dataset('lnlikelihood',
data=dynesty_out['logl'])
sdat.create_dataset('lnprobability',
data=(dynesty_out['logl'] +
model.prior_product(dynesty_out['samples'])))
sdat.create_dataset('efficiency',
data=np.atleast_1d(dynesty_out['eff']))
sdat.create_dataset('niter',
data=np.atleast_1d(dynesty_out['niter']))
sdat.create_dataset('samples_id',
data=np.atleast_1d(dynesty_out['samples_id']))
# JSON Attrs
sdat.attrs['ncall'] = json.dumps(dynesty_out['ncall'].tolist())
sdat.attrs['theta_labels'] = json.dumps(list(model.theta_labels()))
sdat.attrs['sampling_duration'] = json.dumps(tsample)
hf.flush()
def write_h5_header(hf, run_params, model):
"""Write header information about the run.
"""
serialize = {'run_params': run_params,
'model_params': [functions_to_names(p.copy())
for p in model.config_list],
'paramfile_text': paramfile_string(**run_params)}
for k, v in list(serialize.items()):
try:
hf.attrs[k] = json.dumps(v) #, cls=NumpyEncoder)
except(TypeError):
# Should this fall back to pickle.dumps?
hf.attrs[k] = pick(v)
warnings.warn("Could not JSON serialize {}, pickled instead".format(k),
RuntimeWarning)
except:
hf.attrs[k] = unserial
warnings.warn("Could not serialize {}".format(k), RuntimeWarning)
hf.flush()
def write_obs_to_h5(hf, obs):
"""Write observational data to the hdf5 file
"""
try:
odat = hf.create_group('obs')
except(ValueError):
# We already have an 'obs' group
return
for k, v in list(obs.items()):
if k == 'filters':
try:
v = [f.name for f in v]
except:
pass
if isinstance(v, np.ndarray):
odat.create_dataset(k, data=v)
else:
try:
odat.attrs[k] = json.dumps(v) #, cls=NumpyEncoder)
except(TypeError):
# Should this fall back to pickle.dumps?
odat.attrs[k] = pick(v)
warnings.warn("Could not JSON serialize {}, pickled instead".format(k))
except:
odat.attrs[k] = unserial
warnings.warn("Could not serialize {}".format(k))
hf.flush()
def optresultlist_to_ndarray(results):
npar, nout = len(results[0].x), len(results[0].fun)
dt = [("success", np.bool), ("message", "S50"), ("nfev", np.int),
("x", (np.float, npar)), ("fun", (np.float, nout))]
out = np.zeros(len(results), dtype=np.dtype(dt))
for i, r in enumerate(results):
for f in out.dtype.names:
out[i][f] = r[f]
return out
def chain_to_struct(chain, model=None, names=None):
"""Given a (flat)chain (or parameter dictionary) and a model, convert the
chain to a structured array
:param chain:
A chain, ndarry of shape (nsamples, ndim) or a dictionary of
parameters, values of which are numpy datatypes.
:param model:
A ProspectorParams instance
:returns struct:
A structured ndarray of parameter values.
"""
indict = type(chain) == dict
if indict:
return dict_to_struct(chain)
else:
n = np.prod(chain.shape[:-1])
assert model.ndim == chain.shape[-1]
if model is not None:
model.set_parameters(chain[0])
names = model.free_params
dt = [(p, model.params[p].dtype, model.params[p].shape)
for p in names]
else:
dt = [(str(p), "<f8", (1,)) for p in names]
struct = np.zeros(n, dtype=np.dtype(dt))
for i, p in enumerate(names):
if model is not None:
inds = model.theta_index[p]
else:
inds = slice(i, i+1, None)
struct[p] = chain[..., inds].reshape(-1, len(inds))
return struct
def dict_to_struct(indict):
dt = [(p, indict[p].dtype, indict[p].shape)
for p in indict.keys()]
struct = np.zeros(1, dtype=np.dtype(dt))
for i, p in enumerate(indict.keys()):
struct[p] = indict[p]
return struct[p]
def write_pickles(run_params, model, obs, sampler, powell_results,
outroot=None, tsample=None, toptimize=None,
post_burnin_center=None, post_burnin_prob=None,
sampling_initial_center=None, simpleout=False, **extras):
"""Write results to two different pickle files. One (``*_mcmc``) contains
only lists, dictionaries, and numpy arrays and is therefore robust to
changes in object definitions. The other (``*_model``) contains the actual
model object (and minimization result objects) and is therefore more
fragile.
"""
if outroot is None:
tt = int(time.time())
outroot = '{1}_{0}'.format(tt, run_params['outfile'])
bgh = githash(**run_params)
paramfile_text = paramfile_string(**run_params)
write_model_pickle(outroot + '_model', model, bgh=bgh, powell=powell_results,
paramfile_text=paramfile_text)
if simpleout and _has_h5py_:
return
# write out a simple chain as a pickle. This isn't really necessary since
# the hd5 usually works
results = {}
# Useful global info and parameters
results['run_params'] = run_params
results['obs'] = obs
results['model_params'] = [functions_to_names(p.copy()) for p in model.config_list]
results['theta_labels'] = list(model.theta_labels())
# Parameter value at variopus phases
results['initial_theta'] = model.initial_theta
results['sampling_initial_center'] = sampling_initial_center
results['post_burnin_center'] = post_burnin_center
results['post_burnin_prob'] = post_burnin_prob
# Chain and ancillary sampling info
results['chain'] = sampler.chain
results['lnprobability'] = sampler.lnprobability
results['acceptance'] = sampler.acceptance_fraction
results['rstate'] = sampler.random_state
results['sampling_duration'] = tsample
results['optimizer_duration'] = toptimize
results['prospector_version'] = bgh
results['paramfile_text'] = paramfile_text
with open(outroot + '_mcmc', "wb") as out:
pickle.dump(results, out)
def write_model_pickle(outname, model, bgh=None, powell=None, **kwargs):
model_store = {}
model_store['powell'] = powell
model_store['model'] = model
model_store['prospector_version'] = bgh
for k, v in kwargs.items():
try:
model_store[k] = v
except:
pass
with open(outname, "wb") as out:
pickle.dump(model_store, out)
def functions_to_names(p):
"""Replace prior and dust functions (or objects) with the names of those
functions (or pickles).
"""
for k, v in list(p.items()):
if callable(v):
try:
p[k] = [v.__name__, v.__module__]
except(AttributeError):
p[k] = pickle.dumps(v, protocol=2)
return p
| 15,472 | 33.006593 | 95 |
py
|
prospector
|
prospector-master/prospect/io/__init__.py
|
from . import write_results
from . import read_results
__all__ = ["write_results", "read_results"]
| 100 | 19.2 | 43 |
py
|
prospector
|
prospector-master/prospect/models/sedmodel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""sedmodel.py - classes and methods for storing parameters and predicting
observed spectra and photometry from them, given a Source object.
"""
import numpy as np
import os
from numpy.polynomial.chebyshev import chebval, chebvander
from .parameters import ProspectorParams
from scipy.stats import multivariate_normal as mvn
from sedpy.observate import getSED
from ..sources.constants import to_cgs_at_10pc as to_cgs
from ..sources.constants import cosmo, lightspeed, ckms, jansky_cgs
from ..utils.smoothing import smoothspec
__all__ = ["SpecModel", "PolySpecModel",
"SedModel", "PolySedModel", "PolyFitModel"]
class SedModel(ProspectorParams):
"""A subclass of :py:class:`ProspectorParams` that passes the models
through to an ``sps`` object and returns spectra and photometry, including
optional spectroscopic calibration and sky emission.
"""
def predict(self, theta, obs=None, sps=None, **extras):
"""Given a ``theta`` vector, generate a spectrum, photometry, and any
extras (e.g. stellar mass), including any calibration effects.
:param theta:
ndarray of parameter values, of shape ``(ndim,)``
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:func:`utils.obsutils.rectify_obs`
:param sps:
An `sps` object to be used in the model generation. It must have
the :py:func:`get_spectrum` method defined.
:param sigma_spec: (optional, unused)
The covariance matrix for the spectral noise. It is only used for
emission line marginalization.
:returns spec:
The model spectrum for these parameters, at the wavelengths
specified by ``obs['wavelength']``, including multiplication by the
calibration vector. Units of maggies
:returns phot:
The model photometry for these parameters, for the filters
specified in ``obs['filters']``. Units of maggies.
:returns extras:
Any extra aspects of the model that are returned. Typically this
will be `mfrac` the ratio of the surviving stellar mass to the
stellar mass formed.
"""
s, p, x = self.sed(theta, obs, sps=sps, **extras)
self._speccal = self.spec_calibration(obs=obs, **extras)
if obs.get('logify_spectrum', False):
s = np.log(s) + np.log(self._speccal)
else:
s *= self._speccal
return s, p, x
def sed(self, theta, obs=None, sps=None, **kwargs):
"""Given a vector of parameters ``theta``, generate a spectrum, photometry,
and any extras (e.g. surviving mass fraction), ***not** including any
instrument calibration effects. The intrinsic spectrum thus produced is
cached in `_spec` attribute
:param theta:
ndarray of parameter values.
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:func:`utils.obsutils.rectify_obs`
:param sps:
An `sps` object to be used in the model generation. It must have
the :py:func:`get_spectrum` method defined.
:returns spec:
The model spectrum for these parameters, at the wavelengths
specified by ``obs['wavelength']``. Default units are maggies, and
the calibration vector is **not** applied.
:returns phot:
The model photometry for these parameters, for the filters
specified in ``obs['filters']``. Units are maggies.
:returns extras:
Any extra aspects of the model that are returned. Typically this
will be `mfrac` the ratio of the surviving stellar mass to the
steallr mass formed.
"""
self.set_parameters(theta)
spec, phot, extras = sps.get_spectrum(outwave=obs['wavelength'],
filters=obs['filters'],
component=obs.get('component', -1),
lnwavegrid=obs.get('lnwavegrid', None),
**self.params)
spec *= obs.get('normalization_guess', 1.0)
# Remove negative fluxes.
try:
tiny = 1.0 / len(spec) * spec[spec > 0].min()
spec[spec < tiny] = tiny
except:
pass
spec = (spec + self.sky(obs))
self._spec = spec.copy()
return spec, phot, extras
def sky(self, obs):
"""Model for the *additive* sky emission/absorption"""
return 0.
def spec_calibration(self, theta=None, obs=None, **kwargs):
"""Implements an overall scaling of the spectrum, given by the
parameter ``'spec_norm'``
:returns cal: (float)
A scalar multiplicative factor that gives the ratio between the true
spectrum and the observed spectrum
"""
if theta is not None:
self.set_parameters(theta)
return 1.0 * self.params.get('spec_norm', 1.0)
def wave_to_x(self, wavelength=None, mask=slice(None), **extras):
"""Map unmasked wavelengths to the interval (-1, 1). Masked wavelengths may have x>1, x<-1
:param wavelength:
The input wavelengths. ndarray of shape ``(nwave,)``
:param mask: optional
The mask. slice or boolean array with ``True`` for unmasked elements.
The interval (-1, 1) will be defined only by unmasked wavelength points
:returns x:
The wavelength vector, remapped to the interval (-1, 1).
ndarray of same shape as ``wavelength``
"""
x = wavelength - (wavelength[mask]).min()
x = 2.0 * (x / (x[mask]).max()) - 1.0
return x
def mean_model(self, theta, obs, sps=None, sigma_spec=None, **extras):
"""Legacy wrapper around predict()
"""
return self.predict(theta, obs, sps=sps, sigma=sigma_spec, **extras)
class SpecModel(ProspectorParams):
"""A subclass of :py:class:`ProspectorParams` that passes the models
through to an ``sps`` object and returns spectra and photometry, including
optional spectroscopic calibration, and sky emission.
This class performs most of the conversion from intrinsic model spectrum to
observed quantities, and additionally can compute MAP emission line values
and penalties for marginalization over emission line amplitudes.
"""
def predict(self, theta, obs=None, sps=None, sigma_spec=None, **extras):
"""Given a ``theta`` vector, generate a spectrum, photometry, and any
extras (e.g. stellar mass), including any calibration effects.
:param theta:
ndarray of parameter values, of shape ``(ndim,)``
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:func:`utils.obsutils.rectify_obs`
:param sps:
An `sps` object to be used in the model generation. It must have
the :py:func:`get_galaxy_spectrum` method defined.
:param sigma_spec: (optional)
The covariance matrix for the spectral noise. It is only used for
emission line marginalization.
:returns spec:
The model spectrum for these parameters, at the wavelengths
specified by ``obs['wavelength']``, including multiplication by the
calibration vector. Units of maggies
:returns phot:
The model photometry for these parameters, for the filters
specified in ``obs['filters']``. Units of maggies.
:returns extras:
Any extra aspects of the model that are returned. Typically this
will be `mfrac` the ratio of the surviving stellar mass to the
stellar mass formed.
"""
# generate and cache model spectrum and info
self.set_parameters(theta)
self._wave, self._spec, self._mfrac = sps.get_galaxy_spectrum(**self.params)
self._zred = self.params.get('zred', 0)
self._eline_wave, self._eline_lum = sps.get_galaxy_elines()
# Flux normalize
self._norm_spec = self._spec * self.flux_norm()
# generate spectrum and photometry for likelihood
# predict_spec should be called before predict_phot
spec = self.predict_spec(obs, sigma_spec)
phot = self.predict_phot(obs['filters'])
return spec, phot, self._mfrac
def predict_spec(self, obs, sigma_spec, **extras):
"""Generate a prediction for the observed spectrum. This method assumes
that the parameters have been set and that the following attributes are
present and correct:
+ ``_wave`` - The SPS restframe wavelength array
+ ``_zred`` - Redshift
+ ``_norm_spec`` - Observed frame spectral fluxes, in units of maggies
+ ``_eline_wave`` and ``_eline_lum`` - emission line parameters from the SPS model
It generates the following attributes
+ ``_outwave``
+ ``_speccal``
+ ``_elinespec``
And if emission line marginalization is being performed, numerous
quantities related to the emission lines are also cached
(see ``get_el()`` for details.)
:param obs:
An observation dictionary, containing the output wavelength array,
the photometric filter lists, and the observed fluxes and
uncertainties thereon. Assumed to be the result of
:py:meth:`utils.obsutils.rectify_obs`
:param sigma_spec: (optional)
The covariance matrix for the spectral noise. It is only used for
emission line marginalization.
:returns spec:
The prediction for the observed frame spectral flux these
parameters, at the wavelengths specified by ``obs['wavelength']``,
including multiplication by the calibration vector.
ndarray of shape ``(nwave,)`` in units of maggies.
"""
# redshift wavelength
obs_wave = self.observed_wave(self._wave, do_wavecal=False)
self._outwave = obs.get('wavelength', obs_wave)
# cache eline parameters
self.cache_eline_parameters(obs)
# smooth and put on output wavelength grid
smooth_spec = self.smoothspec(obs_wave, self._norm_spec)
# calibration
self._speccal = self.spec_calibration(obs=obs, spec=smooth_spec, **extras)
calibrated_spec = smooth_spec * self._speccal
# generate (after fitting) the emission line spectrum
emask = self._eline_wavelength_mask
# If we're marginalizing over emission lines, and at least one pixel
# has an emission line in it
if self.params.get('marginalize_elines', False) & (emask.any()):
self._elinespec = self.get_el(obs, calibrated_spec, sigma_spec)
calibrated_spec[emask] += self._elinespec.sum(axis=1)
# Otherwise, if FSPS is not adding emission lines to the spectrum, we
# add emission lines to valid pixels here.
elif (self.params.get("nebemlineinspec", True) == False) & (emask.any()):
self._elinespec = self.get_eline_spec(wave=self._wave[emask])
if emask.any():
calibrated_spec[emask] += self._elinespec.sum(axis=1)
self._sed = calibrated_spec / self._speccal
return calibrated_spec
def predict_phot(self, filters):
"""Generate a prediction for the observed photometry. This method assumes
that the parameters have been set and that the following attributes are
present and correct:
+ ``_wave`` - The SPS restframe wavelength array
+ ``_zred`` - Redshift
+ ``_norm_spec`` - Observed frame spectral fluxes, in units of maggies.
+ ``_eline_wave`` and ``_eline_lum`` - emission line parameters from the SPS model
:param filters:
List of :py:class:`sedpy.observate.Filter` objects.
If there is no photometry, ``None`` should be supplied
:returns phot:
Observed frame photometry of the model SED through the given filters.
ndarray of shape ``(len(filters),)``, in units of maggies.
If ``filters`` is None, this returns 0.0
"""
if filters is None:
return 0.0
# generate photometry w/o emission lines
obs_wave = self.observed_wave(self._wave, do_wavecal=False)
flambda = self._norm_spec * lightspeed / obs_wave**2 * (3631*jansky_cgs)
mags = getSED(obs_wave, flambda, filters)
phot = np.atleast_1d(10**(-0.4 * mags))
# generate emission-line photometry
if self.params.get('nebemlineinspec', False) == False:
phot += self.nebline_photometry(filters)
return phot
def nebline_photometry(self, filters):
"""Compute the emission line contribution to photometry. This requires
several cached attributes:
+ ``_ewave_obs``
+ ``_eline_lum``
:param filters:
List of :py:class:`sedpy.observate.Filter` objects
:returns nebflux:
The flux of the emission line through the filters, in units of
maggies. ndarray of shape ``(len(filters),)``
"""
elams = self._ewave_obs
# We have to remove the extra (1+z) since this is flux, not a flux density
# Also we convert to cgs
elums = self._eline_lum * self.flux_norm() / (1 + self._zred) * (3631*jansky_cgs)
# loop over filters
flux = np.zeros(len(filters))
for i, filt in enumerate(filters):
# calculate transmission at line wavelengths
trans = np.interp(elams, filt.wavelength, filt.transmission,
left=0., right=0.)
# include all lines where transmission is non-zero
idx = (trans > 0)
if True in idx:
flux[i] = (trans[idx]*elams[idx]*elums[idx]).sum() / filt.ab_zero_counts
return flux
def flux_norm(self):
"""Compute the scaling required to go from Lsun/Hz/Msun to maggies.
Note this includes the (1+z) factor required for flux densities.
:returns norm: (float)
The normalization factor, scalar float.
"""
# distance factor
if (self._zred == 0) | ('lumdist' in self.params):
lumdist = self.params.get('lumdist', 1e-5)
else:
lumdist = cosmo.luminosity_distance(self._zred).to('Mpc').value
dfactor = (lumdist * 1e5)**2
# Mass normalization
mass = np.sum(self.params.get('mass', 1.0))
# units
unit_conversion = to_cgs / (3631*jansky_cgs) * (1 + self._zred)
return mass * unit_conversion / dfactor
def cache_eline_parameters(self, obs, nsigma=5):
""" This computes and caches a number of quantities that are relevant
for predicting the emission lines, and computing the MAP values thereof,
including
+ ``_ewave_obs`` - Observed frame wavelengths (AA) of all emission lines.
+ ``_eline_sigma_kms`` - Dispersion (in km/s) of all the emission lines
+ ``_elines_to_fit`` - If fitting and marginalizing over emission lines,
this stores indices of the lines to actually fit, as a boolean
array. Only lines that are within ``nsigma`` of an observed
wavelength points are included.
+ ``_eline_wavelength_mask`` - A mask of the `_outwave` vector that
indicates which pixels to use in the emission line fitting.
Only pixels within ``nsigma`` of an emission line are used.
Can be subclassed to add more sophistication
redshift - first looks for ``eline_delta_zred``, and defaults to ``zred``
sigma - first looks for ``eline_sigma``, defaults to 100 km/s
:param nsigma: (float, optional, default: 5.)
Number of sigma from a line center to use for defining which lines
to fit and useful spectral elements for the fitting. float.
"""
# observed wavelengths
eline_z = self.params.get("eline_delta_zred", 0.0)
self._ewave_obs = (1 + eline_z + self._zred) * self._eline_wave
# observed linewidths
nline = self._ewave_obs.shape[0]
self._eline_sigma_kms = np.atleast_1d(self.params.get('eline_sigma', 100.0))
self._eline_sigma_kms = (self._eline_sigma_kms[None] * np.ones(nline)).squeeze()
#self._eline_sigma_lambda = eline_sigma_kms * self._ewave_obs / ckms
# exit gracefully if not fitting lines
if (obs.get('spectrum', None) is None):
self._elines_to_fit = None
self._eline_wavelength_mask = np.array([], dtype=bool)
return
# --- lines to fit ---
# lines specified by user, but remove any lines which do not
# have an observed pixel within 5sigma of their center
eline_names = self.params.get('lines_to_fit', [])
# FIXME: this should be moved to instantiation and only done once
SPS_HOME = os.getenv('SPS_HOME')
emline_info = np.genfromtxt(os.path.join(SPS_HOME, 'data', 'emlines_info.dat'),
dtype=[('wave', 'f8'), ('name', 'S20')],
delimiter=',')
# restrict to specific emission lines?
if (len(eline_names) == 0):
elines_index = np.ones(emline_info.shape, dtype=bool)
else:
elines_index = np.array([True if name in eline_names else False
for name in emline_info['name']], dtype=bool)
eline_sigma_lambda = self._ewave_obs / ckms * self._eline_sigma_kms
new_mask = np.abs(self._outwave-self._ewave_obs[:, None]) < nsigma*eline_sigma_lambda[:, None]
self._elines_to_fit = elines_index & new_mask.any(axis=1)
# --- wavelengths corresponding to those lines ---
# within N sigma of the central wavelength
self._eline_wavelength_mask = new_mask[self._elines_to_fit, :].any(axis=0)
def get_el(self, obs, calibrated_spec, sigma_spec=None):
"""Compute the maximum likelihood and, optionally, MAP emission line
amplitudes for lines that fall within the observed spectral range. Also
compute and cache the analytic penalty to log-likelihood from
marginalizing over the emission line amplitudes. This is cached as
``_ln_eline_penalty``. The emission line amplitudes (in maggies) at
`_eline_lums` are updated to the ML values for the fitted lines.
:param obs:
A dictionary containing the ``'spectrum'`` and ``'unc'`` keys that
are observed fluxes and uncertainties, both ndarrays of shape
``(n_wave,)``
:param calibrated_spec:
The predicted observer-frame spectrum in the same units as the
observed spectrum, ndarray of shape ``(n_wave,)``
:param sigma_spec:
Spectral covariance matrix, if using a non-trivial noise model.
:returns el:
The maximum likelihood emission line flux densities.
ndarray of shape ``(n_wave_neb, n_fitted_lines)`` where
``n_wave_neb`` is the number of wavelength elements within
``nsigma`` of a line, and ``n_fitted_lines`` is the number of lines
that fall within ``nsigma`` of a wavelength pixel. Units are same
as ``calibrated_spec``
"""
# ensure we have no emission lines in spectrum
# and we definitely want them.
assert self.params['nebemlineinspec'] == False
assert self.params['add_neb_emission'] == True
# generate Gaussians on appropriate wavelength gride
idx = self._elines_to_fit
emask = self._eline_wavelength_mask
nebwave = self._outwave[emask]
eline_gaussians = self.get_eline_gaussians(lineidx=idx, wave=nebwave)
# generate residuals
delta = obs['spectrum'][emask] - calibrated_spec[emask]
# generate line amplitudes in observed flux units
units_factor = self.flux_norm() / (1 + self._zred)
calib_factor = np.interp(self._ewave_obs[idx], nebwave, self._speccal[emask])
linecal = units_factor * calib_factor
alpha_breve = self._eline_lum[idx] * linecal
# generate inverse of sigma_spec
if sigma_spec is None:
sigma_spec = obs["unc"]**2
sigma_spec = sigma_spec[emask]
if sigma_spec.ndim == 2:
sigma_inv = np.linalg.pinv(sigma_spec)
else:
sigma_inv = np.diag(1. / sigma_spec)
# calculate ML emission line amplitudes and covariance matrix
sigma_alpha_hat = np.linalg.pinv(np.dot(eline_gaussians.T, np.dot(sigma_inv, eline_gaussians)))
alpha_hat = np.dot(sigma_alpha_hat, np.dot(eline_gaussians.T, np.dot(sigma_inv, delta)))
# generate likelihood penalty term (and MAP amplitudes)
# FIXME: Cache line amplitude covariance matrices?
if self.params.get('use_eline_prior', False):
# Incorporate gaussian priors on the amplitudes
sigma_alpha_breve = np.diag((self.params['eline_prior_width'] * np.abs(alpha_breve)))**2
M = np.linalg.pinv(sigma_alpha_hat + sigma_alpha_breve)
alpha_bar = (np.dot(sigma_alpha_breve, np.dot(M, alpha_hat)) +
np.dot(sigma_alpha_hat, np.dot(M, alpha_breve)))
sigma_alpha_bar = np.dot(sigma_alpha_hat, np.dot(M, sigma_alpha_breve))
K = ln_mvn(alpha_hat, mean=alpha_breve, cov=sigma_alpha_breve+sigma_alpha_hat) - \
ln_mvn(alpha_hat, mean=alpha_hat, cov=sigma_alpha_hat)
else:
# simply use the ML values and associated marginaliztion penalty
alpha_bar = alpha_hat
K = ln_mvn(alpha_hat, mean=alpha_hat, cov=sigma_alpha_hat)
# Cache the ln-penalty
self._ln_eline_penalty = K
# Store fitted emission line luminosities in physical units
self._eline_lum[idx] = alpha_bar / linecal
# return the maximum-likelihood line spectrum in observed units
return alpha_hat * eline_gaussians
def get_eline_spec(self, wave=None):
"""Compute a complete model emission line spectrum. This should only
be run after calling predict(), as it accesses cached information.
Relatively slow, useful for display purposes
:param wave: (optional, default: ``None``)
The wavelength ndarray on which to compute the emission line spectrum.
If not supplied, the ``_outwave`` vector is used.
:returns eline_spec:
An (n_line, n_wave) ndarray
"""
gaussians = self.get_eline_gaussians(wave=wave)
elums = self._eline_lum * self.flux_norm() / (1 + self._zred)
return elums * gaussians
def get_eline_gaussians(self, lineidx=slice(None), wave=None):
"""Generate a set of unit normals with centers and widths given by the
previously cached emission line observed-frame wavelengths and emission
line widths.
:param lineidx: (optional)
A boolean array or integer array used to subscript the cached
lines. Gaussian vectors will only be constructed for the lines
thus subscripted.
:param wave: (optional)
The wavelength array (in Angstroms) used to construct the gaussian
vectors. If not given, the cached `_outwave` array will be used.
:returns gaussians:
The unit gaussians for each line, in units Lsun/Hz.
ndarray of shape (n_wave, n_line)
"""
if wave is None:
warr = self._outwave
else:
warr = wave
# generate gaussians
mu = np.atleast_2d(self._ewave_obs[lineidx])
sigma = np.atleast_2d(self._eline_sigma_kms[lineidx])
dv = ckms * (warr[:, None]/mu - 1)
dv_dnu = ckms * warr[:, None]**2 / (lightspeed * mu)
eline_gaussians = 1. / (sigma * np.sqrt(np.pi * 2)) * np.exp(-dv**2 / (2 * sigma**2))
eline_gaussians *= dv_dnu
# outside of the wavelengths defined by the spectrum? (why this dependence?)
# FIXME what is this?
eline_gaussians /= -np.trapz(eline_gaussians, 3e18/warr[:, None], axis=0)
return eline_gaussians
def smoothspec(self, wave, spec):
"""Smooth the spectrum. See :py:func:`prospect.utils.smoothing.smoothspec`
for details.
"""
sigma = self.params.get("sigma_smooth", 100)
outspec = smoothspec(wave, spec, sigma, outwave=self._outwave, **self.params)
return outspec
def observed_wave(self, wave, do_wavecal=False):
"""Convert the restframe wavelngth grid to the observed frame wavelength
grid, optionally including wavelength calibration adjustments. Requires
that the ``_zred`` attribute is already set.
:param wave:
The wavelength array
"""
# FIXME: missing wavelength calibration code
if do_wavecal:
raise NotImplementedError
a = 1 + self._zred
return wave * a
def wave_to_x(self, wavelength=None, mask=slice(None), **extras):
"""Map unmasked wavelengths to the interval -1, 1
masked wavelengths may have x>1, x<-1
"""
x = wavelength - (wavelength[mask]).min()
x = 2.0 * (x / (x[mask]).max()) - 1.0
return x
def spec_calibration(self, **kwargs):
return np.ones_like(self._outwave)
def mean_model(self, theta, obs, sps=None, sigma=None, **extras):
"""Legacy wrapper around predict()
"""
return self.predict(theta, obs, sps=sps, sigma_spec=sigma, **extras)
class PolySpecModel(SpecModel):
"""This is a subclass of *SpecModel* that generates the multiplicative
calibration vector at each model `predict` call as the maximum likelihood
chebyshev polynomial describing the ratio between the observed and the model
spectrum.
"""
def spec_calibration(self, theta=None, obs=None, spec=None, **kwargs):
"""Implements a Chebyshev polynomial calibration model. This uses
least-squares to find the maximum-likelihood Chebyshev polynomial of a
certain order describing the ratio of the observed spectrum to the model
spectrum, conditional on all other parameters, using least squares. If
emission lines are being marginalized out, they are excluded from the
least-squares fit.
:returns cal:
A polynomial given by :math:`\Sum_{m=0}^M a_{m} * T_m(x)`.
"""
if theta is not None:
self.set_parameters(theta)
# norm = self.params.get('spec_norm', 1.0)
polyopt = ((self.params.get('polyorder', 0) > 0) &
(obs.get('spectrum', None) is not None))
if polyopt:
order = self.params['polyorder']
# generate mask
# remove region around emission lines if doing analytical marginalization
mask = obs.get('mask', np.ones_like(obs['wavelength'], dtype=bool)).copy()
if self.params.get('marginalize_elines', False):
mask[self._eline_wavelength_mask] = 0
# map unmasked wavelengths to the interval -1, 1
# masked wavelengths may have x>1, x<-1
x = self.wave_to_x(obs["wavelength"], mask)
y = (obs['spectrum'] / spec)[mask] - 1.0
yerr = (obs['unc'] / spec)[mask]
yvar = yerr**2
A = chebvander(x[mask], order)
ATA = np.dot(A.T, A / yvar[:, None])
reg = self.params.get('poly_regularization', 0.)
if np.any(reg > 0):
ATA += reg**2 * np.eye(order)
ATAinv = np.linalg.inv(ATA)
c = np.dot(ATAinv, np.dot(A.T, y / yvar))
Afull = chebvander(x, order)
poly = np.dot(Afull, c)
self._poly_coeffs = c
else:
poly = np.zeros_like(self._outwave)
return (1.0 + poly)
class PolySedModel(SedModel):
"""This is a subclass of SedModel that replaces the calibration vector with
the maximum likelihood chebyshev polynomial describing the difference
between the observed and the model spectrum.
"""
def spec_calibration(self, theta=None, obs=None, **kwargs):
"""Implements a Chebyshev polynomial calibration model. This uses
least-squares to find the maximum-likelihood Chebyshev polynomial of a
certain order describing the ratio of the observed spectrum to the
model spectrum, conditional on all other parameters, using least
squares. The first coefficient is always set to 1, as the overall
normalization is controlled by ``spec_norm``.
:returns cal:
A polynomial given by 'spec_norm' * (1 + \Sum_{m=1}^M a_{m} * T_m(x)).
"""
if theta is not None:
self.set_parameters(theta)
norm = self.params.get('spec_norm', 1.0)
polyopt = ((self.params.get('polyorder', 0) > 0) &
(obs.get('spectrum', None) is not None))
if polyopt:
order = self.params['polyorder']
mask = obs.get('mask', slice(None))
# map unmasked wavelengths to the interval -1, 1
# masked wavelengths may have x>1, x<-1
x = self.wave_to_x(obs["wavelength"], mask)
y = (obs['spectrum'] / self._spec)[mask] / norm - 1.0
yerr = (obs['unc'] / self._spec)[mask] / norm
yvar = yerr**2
A = chebvander(x[mask], order)[:, 1:]
ATA = np.dot(A.T, A / yvar[:, None])
reg = self.params.get('poly_regularization', 0.)
if np.any(reg > 0):
ATA += reg**2 * np.eye(order)
ATAinv = np.linalg.inv(ATA)
c = np.dot(ATAinv, np.dot(A.T, y / yvar))
Afull = chebvander(x, order)[:, 1:]
poly = np.dot(Afull, c)
self._poly_coeffs = c
else:
poly = 0.0
return (1.0 + poly) * norm
class PolyFitModel(SedModel):
"""This is a subclass of *SedModel* that generates the multiplicative
calibration vector as a Chebyshev polynomial described by the
``'poly_coeffs'`` parameter of the model, which may be free (fittable)
"""
def spec_calibration(self, theta=None, obs=None, **kwargs):
"""Implements a Chebyshev polynomial calibration model. This only
occurs if ``"poly_coeffs"`` is present in the :py:attr:`params`
dictionary, otherwise the value of ``params["spec_norm"]`` is returned.
:param theta: (optional)
If given, set :py:attr:`params` using this vector before
calculating the calibration polynomial. ndarray of shape
``(ndim,)``
:param obs:
A dictionary of observational data, must contain the key
``"wavelength"``
:returns cal:
If ``params["cal_type"]`` is ``"poly"``, a polynomial given by
``'spec_norm'`` :math:`\times (1 + \Sum_{m=1}^M```'poly_coeffs'[m-1]``:math:` \times T_n(x))`.
Otherwise, the exponential of a Chebyshev polynomial.
"""
if theta is not None:
self.set_parameters(theta)
if ('poly_coeffs' in self.params):
mask = obs.get('mask', slice(None))
# map unmasked wavelengths to the interval -1, 1
# masked wavelengths may have x>1, x<-1
x = self.wave_to_x(obs["wavelength"], mask)
# get coefficients. Here we are setting the first term to 0 so we
# can deal with it separately for the exponential and regular
# multiplicative cases
c = np.insert(self.params['poly_coeffs'], 0, 0)
poly = chebval(x, c)
# switch to have spec_norm be multiplicative or additive depending
# on whether the calibration model is multiplicative in exp^poly or
# just poly
if self.params.get('cal_type', 'exp_poly') == 'poly':
return (1.0 + poly) * self.params.get('spec_norm', 1.0)
else:
return np.exp(self.params.get('spec_norm', 0) + poly)
else:
return 1.0 * self.params.get('spec_norm', 1.0)
def ln_mvn(x, mean=None, cov=None):
"""Calculates the natural logarithm of the multivariate normal PDF
evaluated at `x`
:param x:
locations where samples are desired.
:param mean:
Center(s) of the gaussians.
:param cov:
Covariances of the gaussians.
"""
ndim = mean.shape[-1]
dev = x - mean
log_2pi = np.log(2 * np.pi)
sign, log_det = np.linalg.slogdet(cov)
exp = np.dot(dev.T, np.dot(np.linalg.pinv(cov, rcond=1e-12), dev))
return -0.5 * (ndim * log_2pi + log_det + exp)
def gauss(x, mu, A, sigma):
"""Sample multiple gaussians at positions x.
:param x:
locations where samples are desired.
:param mu:
Center(s) of the gaussians.
:param A:
Amplitude(s) of the gaussians, defined in terms of total area.
:param sigma:
Dispersion(s) of the gaussians, un units of x.
:returns val:
The values of the sum of gaussians at x.
"""
mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)
val = A / (sigma * np.sqrt(np.pi * 2)) * np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2))
return val.sum(axis=-1)
| 34,356 | 40.89878 | 105 |
py
|
prospector
|
prospector-master/prospect/models/priors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""priors.py -- This module contains various objects to be used as priors.
When called these return the ln-prior-probability, and they can also be used to
construct prior transforms (for nested sampling) and can be sampled from.
"""
import numpy as np
import scipy.stats
__all__ = ["Prior", "TopHat", "Normal", "ClippedNormal",
"LogNormal", "LogUniform", "Beta",
"StudentT", "SkewNormal"]
class Prior(object):
"""Encapsulate the priors in an object. Each prior should have a
distribution name and optional parameters specifying scale and location
(e.g. min/max or mean/sigma). These can be aliased at instantiation using
the ``parnames`` keyword. When called, the argument should be a variable
and the object should return the ln-prior-probability of that value.
.. code-block:: python
ln_prior_prob = Prior()(value)
Should be able to sample from the prior, and to get the gradient of the
prior at any variable value. Methods should also be avilable to give a
useful plotting range and, if there are bounds, to return them.
:param parnames:
A list of names of the parameters, used to alias the intrinsic
parameter names. This way different instances of the same Prior can
have different parameter names, in case they are being fit for....
"""
def __init__(self, parnames=[], name='', **kwargs):
"""Constructor.
:param parnames:
A list of names of the parameters, used to alias the intrinsic
parameter names. This way different instances of the same Prior
can have different parameter names, in case they are being fit for....
"""
if len(parnames) == 0:
parnames = self.prior_params
assert len(parnames) == len(self.prior_params)
self.alias = dict(zip(self.prior_params, parnames))
self.params = {}
self.name = name
self.update(**kwargs)
def __repr__(self):
argstring = ['{}={}'.format(k, v) for k, v in list(self.params.items())]
return '{}({})'.format(self.__class__, ",".join(argstring))
def update(self, **kwargs):
"""Update `params` values using alias.
"""
for k in self.prior_params:
try:
self.params[k] = kwargs[self.alias[k]]
except(KeyError):
pass
# FIXME: Should add a check for unexpected kwargs.
def __len__(self):
"""The length is set by the maximum size of any of the prior_params.
Note that the prior params must therefore be scalar of same length as
the maximum size of any of the parameters. This is not checked.
"""
return max([np.size(self.params.get(k, 1)) for k in self.prior_params])
def __call__(self, x, **kwargs):
"""Compute the value of the probability desnity function at x and
return the ln of that.
:param x:
Value of the parameter, scalar or iterable of same length as the
Prior object.
:param kwargs: optional
All extra keyword arguments are sued to update the `prior_params`.
:returns lnp:
The natural log of the prior probability at x, scalar or ndarray of
same length as the prior object.
"""
if len(kwargs) > 0:
self.update(**kwargs)
pdf = self.distribution.pdf
try:
p = pdf(x, *self.args, loc=self.loc, scale=self.scale)
except(ValueError):
# Deal with `x` vectors of shape (nsamples, len(prior))
# for pdfs that don't broadcast nicely.
p = [pdf(_x, *self.args, loc=self.loc, scale=self.scale)
for _x in x]
p = np.array(p)
with np.errstate(invalid='ignore'):
lnp = np.log(p)
return lnp
def sample(self, nsample=None, **kwargs):
"""Draw a sample from the prior distribution.
:param nsample: (optional)
Unused
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.rvs(*self.args, size=len(self),
loc=self.loc, scale=self.scale)
def unit_transform(self, x, **kwargs):
"""Go from a value of the CDF (between 0 and 1) to the corresponding
parameter value.
:param x:
A scalar or vector of same length as the Prior with values between
zero and one corresponding to the value of the CDF.
:returns theta:
The parameter value corresponding to the value of the CDF given by
`x`.
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.ppf(x, *self.args,
loc=self.loc, scale=self.scale)
def inverse_unit_transform(self, x, **kwargs):
"""Go from the parameter value to the unit coordinate using the cdf.
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.cdf(x, *self.args,
loc=self.loc, scale=self.scale)
def gradient(self, theta):
raise(NotImplementedError)
@property
def loc(self):
"""This should be overridden.
"""
return 0
@property
def scale(self):
"""This should be overridden.
"""
return 1
@property
def args(self):
return []
@property
def range(self):
raise(NotImplementedError)
@property
def bounds(self):
raise(NotImplementedError)
def serialize(self):
raise(NotImplementedError)
class TopHat(Prior):
"""A simple uniform prior, described by two parameters
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mini', 'maxi']
distribution = scipy.stats.uniform
@property
def scale(self):
return self.params['maxi'] - self.params['mini']
@property
def loc(self):
return self.params['mini']
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class Normal(Prior):
"""A simple gaussian prior.
:param mean:
Mean of the distribution
:param sigma:
Standard deviation of the distribution
"""
prior_params = ['mean', 'sigma']
distribution = scipy.stats.norm
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
nsig = 4
return (self.params['mean'] - nsig * self.params['sigma'],
self.params['mean'] + nsig * self.params['sigma'])
def bounds(self, **kwargs):
#if len(kwargs) > 0:
# self.update(**kwargs)
return (-np.inf, np.inf)
class ClippedNormal(Prior):
"""A Gaussian prior clipped to some range.
:param mean:
Mean of the normal distribution
:param sigma:
Standard deviation of the normal distribution
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mean', 'sigma', 'mini', 'maxi']
distribution = scipy.stats.truncnorm
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
@property
def args(self):
a = (self.params['mini'] - self.params['mean']) / self.params['sigma']
b = (self.params['maxi'] - self.params['mean']) / self.params['sigma']
return [a, b]
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class LogUniform(Prior):
"""Like log-normal, but the distribution of natural log of the variable is
distributed uniformly instead of normally.
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mini', 'maxi']
distribution = scipy.stats.reciprocal
@property
def args(self):
a = self.params['mini']
b = self.params['maxi']
return [a, b]
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class Beta(Prior):
"""A Beta distribution.
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
:param alpha:
:param beta:
"""
prior_params = ['mini', 'maxi', 'alpha', 'beta']
distribution = scipy.stats.beta
@property
def scale(self):
return self.params.get('maxi', 1) - self.params.get('mini', 0)
@property
def loc(self):
return self.params.get('mini', 0)
@property
def args(self):
a = self.params['alpha']
b = self.params['beta']
return [a, b]
@property
def range(self):
return (self.params.get('mini',0), self.params.get('maxi',1))
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class LogNormal(Prior):
"""A log-normal prior, where the natural log of the variable is distributed
normally. Useful for parameters that cannot be less than zero.
Note that ``LogNormal(np.exp(mode) / f) == LogNormal(np.exp(mode) * f)``
and ``f = np.exp(sigma)`` corresponds to "one sigma" from the peak.
:param mode:
Natural log of the variable value at which the probability density is
highest.
:param sigma:
Standard deviation of the distribution of the natural log of the
variable.
"""
prior_params = ['mode', 'sigma']
distribution = scipy.stats.lognorm
@property
def args(self):
return [self.params["sigma"]]
@property
def scale(self):
return np.exp(self.params["mode"] + self.params["sigma"]**2)
@property
def loc(self):
return 0
@property
def range(self):
nsig = 4
return (np.exp(self.params['mode'] + (nsig * self.params['sigma'])),
np.exp(self.params['mode'] - (nsig * self.params['sigma'])))
def bounds(self, **kwargs):
return (0, np.inf)
class LogNormalLinpar(Prior):
"""A log-normal prior, where the natural log of the variable is distributed
normally. Useful for parameters that cannot be less than zero.
LogNormal(mode=x, sigma=y) is equivalent to
LogNormalLinpar(mode=np.exp(x), sigma_factor=np.exp(y))
:param mode:
The (linear) value of the variable where the probability density is
highest. Must be > 0.
:param sigma_factor:
The (linear) factor describing the dispersion of the log of the
variable. Must be > 0
"""
prior_params = ['mode', 'sigma_factor']
distribution = scipy.stats.lognorm
@property
def args(self):
return [np.log(self.params["sigma_factor"])]
@property
def scale(self):
k = self.params["sigma_factor"]**np.log(self.params["sigma_factor"])
return self.params["mode"] * k
@property
def loc(self):
return 0
@property
def range(self):
nsig = 4
return (self.params['mode'] * (nsig * self.params['sigma_factor']),
self.params['mode'] / (nsig * self.params['sigma_factor']))
def bounds(self, **kwargs):
return (0, np.inf)
class SkewNormal(Prior):
"""A normal distribution including a skew parameter
:param location:
Center (*not* mean, mode, or median) of the distribution.
The center will approach the mean as skew approaches zero.
:param sigma:
Standard deviation of the distribution
:param skew:
Skewness of the distribution
"""
prior_params = ['location', 'sigma', 'skew']
distribution = scipy.stats.skewnorm
@property
def args(self):
return [self.params['skew']]
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['location']
@property
def range(self):
nsig = 4
return (self.params['location'] - nsig * self.params['sigma'],
self.params['location'] + nsig * self.params['sigma'])
def bounds(self, **kwargs):
return (-np.inf, np.inf)
class StudentT(Prior):
"""A Student's T distribution
:param mean:
Mean of the distribution
:param scale:
Size of the distribution, analogous to the standard deviation
:param df:
Number of degrees of freedom
"""
prior_params = ['mean', 'scale', 'df']
distribution = scipy.stats.t
@property
def args(self):
return [self.params['df']]
@property
def scale(self):
return self.params['scale']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
return scipy.stats.t.interval(0.995, self.params['df'], self.params['mean'], self.params['scale'])
def bounds(self, **kwargs):
return (-np.inf, np.inf)
| 13,637 | 26.330661 | 106 |
py
|
prospector
|
prospector-master/prospect/models/parameters.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""parameters.py -- This module contains the prospector base class for
models, ProspectorParams. This class is responsible for maintaining model
parameter state, converting between parameter dictionaries and vectors,
and computing parameter dependencies and prior probabilities.
"""
from copy import deepcopy
import warnings
import numpy as np
import json, pickle
from . import priors
from .templates import describe
__all__ = ["ProspectorParams"]
# A template for what parameter configuration list element should look like
param_template = {'name': '',
'N': 1,
'isfree': True,
'init': 0.5, 'units': '',
'prior': priors.TopHat(mini=0, maxi=1.0),
'depends_on': None}
class ProspectorParams(object):
"""
This is the base model class that holds model parameters and information
about them (e.g. priors, bounds, transforms, free vs fixed state). In
addition to the documented methods, it contains several important
attributes:
* :py:attr:`params`: model parameter state dictionary.
* :py:attr:`theta_index`: A dictionary that maps parameter names to indices (or rather
slices) of the parameter vector ``theta``.
* :py:attr:`config_dict`: Information about each parameter as a dictionary keyed by
parameter name for easy access.
* :py:attr:`config_list`: Information about each parameter stored as a list.
Intitialization is via, e.g.,
.. code-block:: python
model_dict = {"mass": {"N": 1, "isfree": False, "init": 1e10}}
model = ProspectorParams(model_dict, param_order=None)
:param configuration:
A list or dictionary of model parameters specifications.
"""
def __init__(self, configuration, verbose=True, param_order=None, **kwargs):
"""
:param configuration:
A list or dictionary of parameter specification dictionaries.
:param param_order: (optional, default: None)
If given and `configuration` is a dictionary, this will specify the
order in which the parameters appear in the theta vector. Iterable
of strings.
"""
self.init_config = deepcopy(configuration)
self.parameter_order = param_order
if type(configuration) == list:
self.config_list = configuration
self.config_dict = plist_to_pdict(self.config_list)
elif type(configuration) == dict:
self.config_dict = configuration
self.config_list = pdict_to_plist(self.config_dict, order=param_order)
else:
raise TypeError("Configuration variable not of valid type: "
"{}".format(type(configuration)))
self.configure(**kwargs)
self.verbose = verbose
def __repr__(self):
return ":::::::\n{}\n\n{}".format(self.__class__, self.description)
def configure(self, reset=False, **kwargs):
"""Use the :py:attr:`config_dict` to generate a :py:attr:`theta_index`
mapping, and propogate the initial parameters into the
:py:attr:`params` state dictionary, and store the intital theta vector
thus implied.
:param kwargs:
Keyword parameters can be used to override or add to the initial
parameter values specified in :py:attr:`config_dict`
:param reset: (default: False)
If true, empty the params dictionary before re-reading the
:py:attr:`config_dict`
"""
self._has_parameter_dependencies = False
if (not hasattr(self, 'params')) or reset:
self.params = {}
self.map_theta()
# Propogate initial parameter values from the configure dictionary
# Populate the 'prior' key of the configure dictionary
# Check for 'depends_on'
for par, info in list(self.config_dict.items()):
self.params[par] = np.atleast_1d(info['init']).copy()
try:
# this is for backwards compatibility
self.config_dict[par]['prior'] = info['prior_function']
except(KeyError):
pass
if info.get('depends_on', None) is not None:
assert callable(info["depends_on"])
self._has_parameter_dependencies = True
# propogate user supplied values to the params state, overriding the
# configure `init` values
for k, v in list(kwargs.items()):
self.params[k] = np.atleast_1d(v)
# store these initial values
self.initial_theta = self.theta.copy()
def map_theta(self):
"""Construct the mapping from parameter name to the index in the theta
vector corresponding to the first element of that parameter. Called
during configuration.
"""
self.theta_index = {}
count = 0
for par in self.free_params:
n = self.config_dict[par]['N']
self.theta_index[par] = slice(count, count + n)
count += n
good = len(self.config_dict[par]['prior']) == n
if not good:
msg = "{} has wrong length prior, should be {}"
warnings.warn(msg.format(par, n), RuntimeWarning)
self.ndim = count
def set_parameters(self, theta):
"""Propagate theta into the model parameters :py:attr:`params` dictionary.
:param theta:
A theta parameter vector containing the desired parameters. ndarray
of shape ``(ndim,)``
"""
assert len(theta) == self.ndim
for k, inds in list(self.theta_index.items()):
self.params[k] = np.atleast_1d(theta[inds]).copy()
self.propagate_parameter_dependencies()
def prior_product(self, theta, nested=False, **extras):
"""Public version of _prior_product to be overridden by subclasses.
:param theta:
The parameter vector for which you want to calculate the
prior. ndarray of shape ``(..., ndim)``
:param nested:
If using nested sampling, this will only return 0 (or -inf). This
behavior can be overridden if you want to include complicated
priors that are not included in the unit prior cube based proposals
(e.g. something that is difficult to transform from the unit cube.)
:returns lnp_prior:
The natural log of the prior probability at ``theta``
"""
lpp = self._prior_product(theta)
if nested & np.any(np.isfinite(lpp)):
return 0.0
return lpp
def _prior_product(self, theta, **extras):
"""Return a scalar which is the ln of the product of the prior
probabilities for each element of theta. Requires that the prior
functions are defined in the theta descriptor.
:param theta:
Iterable containing the free model parameter values. ndarray of
shape ``(ndim,)``
:returns lnp_prior:
The natural log of the product of the prior probabilities for these
parameter values.
"""
lnp_prior = 0
for k, inds in list(self.theta_index.items()):
func = self.config_dict[k]['prior']
this_prior = np.sum(func(theta[..., inds]), axis=-1)
lnp_prior += this_prior
return lnp_prior
def prior_transform(self, unit_coords):
"""Go from unit cube to parameter space, for nested sampling.
:param unit_coords:
Coordinates in the unit hyper-cube. ndarray of shape ``(ndim,)``.
:returns theta:
The parameter vector corresponding to the location in prior CDF
corresponding to ``unit_coords``. ndarray of shape ``(ndim,)``
"""
theta = np.zeros(len(unit_coords))
for k, inds in list(self.theta_index.items()):
func = self.config_dict[k]['prior'].unit_transform
theta[inds] = func(unit_coords[inds])
return theta
def propagate_parameter_dependencies(self):
"""Propogate any parameter dependecies. That is, for parameters whose
value depends on another parameter, calculate those values and store
them in the :py:attr:`self.params` dictionary.
"""
if self._has_parameter_dependencies == False:
return
for p, info in list(self.config_dict.items()):
if 'depends_on' in info:
value = info['depends_on'](**self.params)
self.params[p] = np.atleast_1d(value)
def rectify_theta(self, theta, epsilon=1e-10):
"""Replace zeros in a given theta vector with a small number epsilon.
"""
zero = (theta == 0)
theta[zero] = epsilon
return theta
@property
def theta(self):
"""The current value of the theta vector, pulled from the ``params``
state dictionary.
"""
theta = np.zeros(self.ndim)
for k, inds in list(self.theta_index.items()):
theta[inds] = self.params[k]
return theta
@property
def free_params(self):
"""A list of the names of the free model parameters.
"""
return [k['name'] for k in pdict_to_plist(self.config_list)
if k.get('isfree', False)]
@property
def fixed_params(self):
"""A list of the names fixed model parameters that are specified in the
``config_dict``.
"""
return [k['name'] for k in pdict_to_plist(self.config_list)
if (k.get('isfree', False) == False)]
@property
def description(self):
return describe(self.config_dict, current_params=self.params)
def theta_labels(self, name_map={}):
"""Using the theta_index parameter map, return a list of the model
parameter names that has the same order as the sampling chain array.
:param name_map:
A dictionary mapping model parameter names to output label
names.
:returns labels:
A list of labels of the same length and order as the theta
vector.
"""
label, index = [], []
for p, inds in list(self.theta_index.items()):
nt = inds.stop - inds.start
try:
name = name_map[p]
except(KeyError):
name = p
if nt == 1:
label.append(name)
index.append(inds.start)
else:
for i in range(nt):
label.append(name+'_{0}'.format(i+1))
index.append(inds.start+i)
return [l for (i, l) in sorted(zip(index, label))]
def theta_bounds(self):
"""Get the bounds on each parameter from the prior.
:returns bounds:
A list of length ``ndim`` of tuples ``(lo, hi)`` giving the
parameter bounds.
"""
bounds = np.zeros([self.ndim, 2])
for p, inds in list(self.theta_index.items()):
pb = self.config_dict[p]['prior'].bounds()
bounds[inds, :] = np.array(pb).T
# Force types ?
bounds = [(np.atleast_1d(a)[0], np.atleast_1d(b)[0])
for a, b in bounds]
return bounds
def theta_disps(self, default_disp=0.1, fractional_disp=False):
"""Get a vector of absolute dispersions for each parameter to use in
generating sampler balls for emcee's Ensemble sampler. This can be
overridden by subclasses if fractional dispersions are desired.
:param initial_disp: (default: 0.1)
The default dispersion to use in case the ``"init_disp"`` key is
not provided in the parameter configuration.
:param fractional_disp: (default: False)
Treat the dispersion values as fractional dispersions.
:returns disp:
The dispersion in the parameters to use for generating clouds of
walkers (or minimizers.) ndarray of shape ``(ndim,)``
"""
disp = np.zeros(self.ndim) + default_disp
for par, inds in list(self.theta_index.items()):
d = self.config_dict[par].get('init_disp', default_disp)
disp[inds] = d
if fractional_disp:
disp = self.theta * disp
return disp
def theta_disp_floor(self, thetas=None):
"""Get a vector of dispersions for each parameter to use as a floor for
the emcee walker-calculated dispersions. This can be overridden by
subclasses.
:returns disp_floor:
The minimum dispersion in the parameters to use for generating
clouds of walkers (or minimizers.) ndarray of shape ``(ndim,)``
"""
dfloor = np.zeros(self.ndim)
for par, inds in list(self.theta_index.items()):
d = self.config_dict[par].get('disp_floor', 0.0)
dfloor[inds] = d
return dfloor
def clip_to_bounds(self, thetas):
"""Clip a set of parameters theta to within the priors.
:param thetas:
The parameter vector, ndarray of shape ``(ndim,)``.
:returns thetas:
The input vector, clipped to the bounds of the priors.
"""
bounds = self.theta_bounds()
for i in range(len(bounds)):
lower, upper = bounds[i]
thetas[i] = np.clip(thetas[i], lower, upper)
return thetas
@property
def _config_dict(self):
"""Backwards compatibility
"""
return self.config_dict
def plist_to_pdict(inplist):
"""Convert from a parameter list to a parameter dictionary, where the keys
of the cdictionary are the parameter names.
"""
plist = deepcopy(inplist)
if type(plist) is dict:
return plist.copy()
pdict = {}
for p in plist:
name = p.pop('name')
pdict[name] = p
return pdict
def pdict_to_plist(pdict, order=None):
"""Convert from a dictionary of parameter dictionaries to a list of
parameter dictionaries, adding each key to each value dictionary as the
`name' keyword. Optionally, do this in an order specified by `order`. This
method is not used often, so it can be a bit inefficient
:param pdict:
A dictionary of parameter specification dictionaries, keyed by
parameter name. If a list is given instead of a dictionary, this same
list is returned.
:param order:
An iterable of parameter names specifying the order in which they
should be added to the parameter list
:returns plist:
A list of parameter specification dictinaries (with the `"name"` key
added.) The listed dictionaries are *not* copied from the input
dictionary.
"""
if type(pdict) is list:
return pdict[:]
plist = []
if order is not None:
assert len(order) == len(pdict)
else:
order = pdict.keys()
for k in order:
v = pdict[k]
v['name'] = k
plist += [v]
return plist
| 15,162 | 36.07335 | 90 |
py
|
prospector
|
prospector-master/prospect/models/model_setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, getopt, json, warnings
from copy import deepcopy
import numpy as np
from . import parameters
from ..utils.obsutils import fix_obs
"""This module has methods to take a .py file containing run parameters, model
parameters and other info and return a run_params dictionary, an obs
dictionary, and a model. It also has methods to parse command line options and
return an sps object and noise objects.
Most of the load_<x> methods are just really shallow wrappers on
```import_module_from_file(param_file).load_<x>(**kwargs)``` and could probably
be done away with at this point, as they add a mostly useless layer of
abstraction. Kept here for future flexibility.
"""
__all__ = ["parse_args", "import_module_from_file", "get_run_params",
"load_model", "load_obs", "load_sps", "load_gp", "show_syntax"]
deprecation_msg = ("Use argparse based operation; usage via prospector_*.py "
"scripts will be disabled in the future.")
def parse_args(argv, argdict={}):
"""Parse command line arguments, allowing for optional arguments.
Simple/Fragile.
"""
warnings.warn(deprecation_msg, FutureWarning)
args = [sub for arg in argv[1:] for sub in arg.split('=')]
for i, a in enumerate(args):
if (a[:2] == '--'):
abare = a[2:]
if abare == 'help':
show_syntax(argv, argdict)
sys.exit()
else:
continue
if abare in argdict.keys():
apo = deepcopy(args[i+1])
func = type(argdict[abare])
try:
argdict[abare] = func(apo)
if func is bool:
argdict[abare] = apo in ['True', 'true', 'T', 't', 'yes']
except TypeError:
argdict[abare] = apo
return argdict
def get_run_params(param_file=None, argv=None, **kwargs):
"""Get a run_params dictionary from the param_file (if passed) otherwise
return the kwargs dictionary.
The order of precedence of parameter specification locations is:
* 1. param_file (lowest)
* 2. kwargs passsed to this function
* 3. command line arguments
"""
warnings.warn(deprecation_msg, FutureWarning)
rp = {}
if param_file is None:
ext = ""
else:
ext = param_file.split('.')[-1]
if ext == 'py':
setup_module = import_module_from_file(param_file)
rp = deepcopy(setup_module.run_params)
elif ext == 'json':
rp, mp = parameters.read_plist(param_file)
if kwargs is not None:
kwargs.update(rp)
rp = kwargs
if argv is not None:
rp = parse_args(argv, argdict=rp)
rp['param_file'] = param_file
return rp
def load_sps(param_file=None, **kwargs):
"""Return an ``sps`` object which is used to hold spectral libraries,
perform interpolations, convolutions, etc.
"""
warnings.warn(deprecation_msg, FutureWarning)
ext = param_file.split('.')[-1]
assert ext == 'py'
setup_module = import_module_from_file(param_file)
if hasattr(setup_module, 'load_sps'):
builder = setup_module.load_sps
elif hasattr(setup_module, 'build_sps'):
builder = setup_module.build_sps
else:
warnings.warn("Could not find load_sps or build_sps methods in param_file")
return None
sps = builder(**kwargs)
return sps
def load_gp(param_file=None, **kwargs):
"""Return two Gaussian Processes objects, either using BSFH's internal GP
objects or George.
:returns gp_spec:
The gaussian process object to use for the spectroscopy.
:returns gp_phot:
The gaussian process object to use for the photometry.
"""
warnings.warn(deprecation_msg, FutureWarning)
ext = param_file.split('.')[-1]
assert ext == "py"
setup_module = import_module_from_file(param_file)
if hasattr(setup_module, 'load_gp'):
builder = setup_module.load_gp
elif hasattr(setup_module, 'build_noise'):
builder = setup_module.build_noise
else:
warnings.warn("Could not find load_gp or build_noise methods in param_file")
return None, None
spec_noise, phot_noise = builder(**kwargs)
return spec_noise, phot_noise
def load_model(param_file=None, **kwargs):
"""Load the model object from a model config list given in the config file.
:returns model:
An instance of the parameters.ProspectorParams object which has
been configured
"""
warnings.warn(deprecation_msg, FutureWarning)
ext = param_file.split('.')[-1]
assert ext == 'py'
setup_module = import_module_from_file(param_file)
#mp = deepcopy(setup_module.model_params)
if hasattr(setup_module, 'load_model'):
builder = setup_module.load_model
elif hasattr(setup_module, 'build_model'):
builder = setup_module.build_model
else:
warnings.warn("Could not find load_model or build_model methods in param_file")
return None
model = builder(**kwargs)
return model
def load_obs(param_file=None, **kwargs):
"""Load the obs dictionary using the `obs` attribute or methods in
``param_file``. kwargs are passed to these methods and ``fix_obs()``
:returns obs:
A dictionary of observational data.
"""
warnings.warn(deprecation_msg, FutureWarning)
ext = param_file.split('.')[-1]
obs = None
assert ext == 'py'
print('reading py script {}'.format(param_file))
setup_module = import_module_from_file(param_file)
if hasattr(setup_module, 'obs'):
return fix_obs(deepcopy(setup_module.obs))
if hasattr(setup_module, 'load_obs'):
builder = setup_module.load_obs
elif hasattr(setup_module, 'build_obs'):
builder = setup_module.build_obs
else:
warnings.warn("Could not find load_obs or build_obs methods in param_file")
return None
obs = builder(**kwargs)
obs = fix_obs(obs, **kwargs)
return obs
def import_module_from_file(path_to_file):
"""This has to break everything ever, right?
"""
from importlib import import_module
path, filename = os.path.split(path_to_file)
modname = filename.replace('.py', '')
sys.path.insert(0, path)
user_module = import_module(modname)
sys.path.remove(path)
return user_module
def import_module_from_string(source, name, add_to_sys_modules=True):
"""Well this seems dangerous.
"""
import imp
user_module = imp.new_module(name)
exec(source, user_module.__dict__)
if add_to_sys_modules:
sys.modules[name] = user_module
return user_module
def show_syntax(args, ad):
"""Show command line syntax corresponding to the provided arg dictionary
`ad`.
"""
print('Usage:\n {0} '.format(args[0]) +
' '.join(['--{0}=<value>'.format(k) for k in ad.keys()]))
class Bunch(object):
""" Simple storage.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def custom_filter_dict(filename):
filter_dict = {}
with open(filename, 'r') as f:
for line in f:
ind, name = line.split()
filter_dict[name.lower()] = Bunch(index=int(ind)-1)
return filter_dict
| 7,298 | 29.668067 | 87 |
py
|
prospector
|
prospector-master/prospect/models/templates.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""templates.py -- A set of predefined "base" prospector model specifications
that can be used as a starting point and then combined or altered.
"""
from copy import deepcopy
import numpy as np
from . import priors
from . import transforms
__all__ = ["TemplateLibrary",
"describe",
"adjust_dirichlet_agebins",
"adjust_continuity_agebins",
]
class Directory(object):
"""A dict-like that only returns copies of the dictionary values.
It also includes a dictionary of information describing each entry in the
directory.
"""
def __init__(self):
self._entries = {}
self._descriptions = {}
try:
self.iteritems = self._entries.iteritems
except AttributeError:
self.iteritems = self._entries.items
def __getitem__(self, k):
return deepcopy(self._entries[k])
def __setitem__(self, k, v):
entry, description = v
self._entries[k] = entry
self._descriptions[k] = description
def describe(self, k):
print(describe(self._entries[k]))
def show_contents(self):
for k, v in list(self._descriptions.items()):
print("'{}':\n {}".format(k, v))
def describe(parset, current_params={}):
ttext = "Free Parameters: (name: prior) \n-----------\n"
free = ["{}: {}".format(k, v["prior"])
for k, v in list(parset.items()) if v["isfree"]]
ttext += " " + "\n ".join(free)
ftext = "Fixed Parameters: (name: value [, depends_on]) \n-----------\n"
fixed = ["{}: {} {}".format(k, current_params.get(k, v["init"]),
v.get("depends_on", ""))
for k, v in list(parset.items()) if not v["isfree"]]
ftext += " " + "\n ".join(fixed)
return ttext + "\n\n" + ftext
def adjust_dirichlet_agebins(parset, agelims=[0., 8., 9., 10.]):
"""Given a list of limits in age for bins, adjust the parameter
specifications to work for those limits.
:param parset:
The parameter specification dictionary to adjust. Must have entries (keys) for
"mass", "agebins", "zfraction"
:param agelims:
An iterable fo bin edges, in log(yrs) of lookback time.
"""
agebins = np.array([agelims[:-1], agelims[1:]]).T
ncomp = len(agelims) - 1
# constant SFR
zinit = np.array([(i-1)/float(i) for i in range(ncomp, 1, -1)])
# Set up the prior in `z` variables that corresponds to a dirichlet in sfr
# fraction. THIS IS IMPORTANT
alpha = np.arange(ncomp-1, 0, -1)
zprior = priors.Beta(alpha=alpha, beta=np.ones_like(alpha), mini=0.0, maxi=1.0)
parset['mass']['N'] = ncomp
parset['agebins']['N'] = ncomp
parset['agebins']['init'] = agebins
parset['z_fraction']['N'] = len(zinit)
parset['z_fraction']['init'] = zinit
parset['z_fraction']['prior'] = zprior
return parset
def adjust_continuity_agebins(parset, tuniv=13.7, nbins=7):
"""defines agebins
the first two agebins are hard-coded to be 0-30 Myr, 30-100 Myr
the final agebin is hard-coded to cover 0.85*t_univ-t_univ
the rest split logarithmic time evenly
inputs:
tuniv is the age of the Universe in Gyr
nbins is the number of SFH bins
"""
if nbins < 4:
raise ValueError('Must have nbins >= 4, returning')
tbinmax = (tuniv * 0.85) * 1e9
lim1, lim2 = 7.4772, 8.0
agelims = [0,lim1] + np.linspace(lim2,np.log10(tbinmax),nbins-2).tolist() + [np.log10(tuniv*1e9)]
agebins = np.array([agelims[:-1], agelims[1:]])
ncomp = nbins
mean = np.zeros(ncomp-1)
scale = np.ones_like(mean) * 0.3
df = np.ones_like(mean) * 2
rprior = priors.StudentT(mean=mean, scale=scale, df=df)
parset['mass']['N'] = ncomp
parset['agebins']['N'] = ncomp
parset['agebins']['init'] = agebins.T
parset["logsfr_ratios"]["N"] = ncomp - 1
parset["logsfr_ratios"]["init"] = mean
parset["logsfr_ratios"]["prior"] = rprior
return parset
TemplateLibrary = Directory()
# A template for what parameter configuration element should look like
par_name = {"N": 1,
"isfree": True,
"init": 0.5,
"prior": priors.TopHat(mini=0, maxi=1.0),
"depends_on": None,
"units": "",}
# ---------------------
# --- Explicit defaults
# --------------------
imf = {"N": 1, "isfree": False, "init": 2} # Kroupa
dust_type = {"N": 1, "isfree": False, "init": 0} # Power-law
_defaults_ = {"imf_type": imf, # FSPS parameter
"dust_type": dust_type # FSPS parameter
}
TemplateLibrary["type_defaults"] = (_defaults_,
"Explicitly sets dust amd IMF types.")
# --------------------------
# --- Some (very) common parameters ----
# --------------------------
zred = {"N": 1, "isfree": False,
"init": 0.1,
"units": "redshift",
"prior": priors.TopHat(mini=0.0, maxi=4.0)}
mass = {"N": 1, "isfree": True,
"init": 1e10,
"units": "Solar masses formed",
"prior": priors.LogUniform(mini=1e8, maxi=1e12)}
logzsol = {"N": 1, "isfree": True,
"init": -0.5,
"units": r"$\log (Z/Z_\odot)$",
"prior": priors.TopHat(mini=-2, maxi=0.19)}
dust2 = {"N": 1, "isfree": True,
"init": 0.6,
"units": "optical depth at 5500AA",
"prior": priors.TopHat(mini=0.0, maxi=2.0)}
sfh = {"N": 1, "isfree": False, "init": 0, "units": "FSPS index"}
tage = {"N": 1, "isfree": True,
"init": 1, "units": "Gyr",
"prior": priors.TopHat(mini=0.001, maxi=13.8)}
_basic_ = {"zred":zred,
"mass": mass,
"logzsol": logzsol, # FSPS parameter
"dust2": dust2, # FSPS parameter
"sfh": sfh, # FSPS parameter
"tage": tage # FSPS parameter
}
_basic_.update(_defaults_)
TemplateLibrary["ssp"] = (_basic_,
("Basic set of (free) parameters for a delta function SFH"))
# ----------------------------
# --- Parametric SFH -----
# ----------------------------
_parametric_ = TemplateLibrary["ssp"]
_parametric_["sfh"]["init"] = 4 # Delay-tau
_parametric_["tau"] = {"N": 1, "isfree": True,
"init": 1, "units": "Gyr^{-1}",
"prior": priors.LogUniform(mini=0.1, maxi=30)}
TemplateLibrary["parametric_sfh"] = (_parametric_,
("Basic set of (free) parameters for a delay-tau SFH."))
# --------------------------
# --- Dust emission ----
# --------------------------
add_duste = {"N": 1, "isfree": False, "init": True}
duste_umin = {"N": 1, "isfree": False,
"init": 1.0, "units": 'MMP83 local MW intensity',
"prior": priors.TopHat(mini=0.1, maxi=25)}
duste_qpah = {"N": 1, "isfree": False,
'init': 4.0, "units": 'Percent mass fraction of PAHs in dust.',
"prior": priors.TopHat(mini=0.5, maxi=7.0)}
duste_gamma = {"N": 1, "isfree": False,
"init": 1e-3, "units": 'Mass fraction of dust in high radiation intensity.',
"prior": priors.LogUniform(mini=1e-3, maxi=0.15)}
_dust_emission_ = {"add_dust_emission": add_duste,
"duste_umin": duste_umin, # FSPS / Draine & Li parameter
"duste_qpah": duste_qpah, # FSPS / Draine & Li parameter
"duste_gamma": duste_gamma # FSPS / Draine & Li parameter
}
TemplateLibrary["dust_emission"] = (_dust_emission_,
("The set of (fixed) dust emission parameters."))
# --------------------------
# --- Nebular Emission ----
# --------------------------
add_neb = {'N': 1, 'isfree': False, 'init': True}
neb_cont = {'N': 1, 'isfree': False, 'init': True}
neb_spec = {'N': 1, 'isfree': False, 'init': True}
# Note this depends on stellar metallicity
gas_logz = {'N': 1, 'isfree': False,
'init': 0.0, 'units': r'log Z/Z_\odot',
'depends_on': transforms.stellar_logzsol,
'prior': priors.TopHat(mini=-2.0, maxi=0.5)}
gas_logu = {"N": 1, 'isfree': False,
'init': -2.0, 'units': r"Q_H/N_H",
'prior': priors.TopHat(mini=-4, maxi=-1)}
_nebular_ = {"add_neb_emission": add_neb, # FSPS parameter.
"add_neb_continuum": neb_cont, # FSPS parameter.
"nebemlineinspec": neb_spec, # FSPS parameter.
"gas_logz": gas_logz, # FSPS parameter.
"gas_logu": gas_logu, # FSPS parameter.
}
TemplateLibrary["nebular"] = (_nebular_,
("The set of nebular emission parameters, "
"with gas_logz tied to stellar logzsol."))
# -----------------------------------------
# --- Nebular Emission Marginalization ----
# -----------------------------------------
marginalize_elines = {'N': 1, 'isfree': False, 'init': True}
use_eline_prior = {'N': 1, 'isfree': False, 'init': True}
nebemlineinspec = {'N': 1, 'isfree': False, 'init': False} # can't be included w/ marginalization
# marginalize over which of the 128 FSPS emission lines?
# input is a list of emission line names matching $SPS_HOME/data/emlines_info.dat
lines_to_fit = {'N': 1, 'isfree': False, 'init': []}
eline_prior_width = {'N': 1, 'isfree': False,
'init': 0.2, 'units': r'width of Gaussian prior on line luminosity, in units of (true luminosity/FSPS predictions)',
'prior': None}
eline_delta_zred = {'N': 1, 'isfree': True,
'init': 0.0, 'units': r'redshift',
'prior': priors.TopHat(mini=-0.01, maxi=0.01)}
eline_sigma = {'N': 1, 'isfree': True,
'init': 100.0, 'units': r'km/s',
'prior': priors.TopHat(mini=30, maxi=300)}
_neb_marg_ = {"marginalize_elines": marginalize_elines,
"use_eline_prior": use_eline_prior,
"nebemlineinspec": nebemlineinspec,
"lines_to_fit": lines_to_fit,
"eline_prior_width": eline_prior_width,
"eline_sigma": eline_sigma
}
_fit_eline_redshift_ = {'eline_delta_zred': eline_delta_zred}
TemplateLibrary["nebular_marginalization"] = (_neb_marg_,
("Marginalize over emission amplitudes line contained in"
"the observed spectrum"))
TemplateLibrary["fit_eline_redshift"] = (_fit_eline_redshift_,
("Fit for the redshift of the emission lines separately"
"from the stellar redshift"))
# -------------------------
# --- Outlier Templates ---
# -------------------------
f_outlier_spec = {"N": 1,
"isfree": True,
"init": 0.01,
"prior": priors.TopHat(mini=1e-5, maxi=0.5)}
nsigma_outlier_spec = {"N": 1,
"isfree": False,
"init": 50.0}
f_outlier_phot = {"N": 1,
"isfree": False,
"init": 0.00,
"prior": priors.TopHat(mini=0.0, maxi=0.5)}
nsigma_outlier_phot = {"N": 1,
"isfree": False,
"init": 50.0}
_outlier_modeling_ = {"f_outlier_spec": f_outlier_spec,
"nsigma_outlier_spec": nsigma_outlier_spec,
"f_outlier_phot": f_outlier_phot,
"nsigma_outlier_phot": nsigma_outlier_phot
}
TemplateLibrary['outlier_model'] = (_outlier_modeling_,
("The set of outlier (mixture) models for spectroscopy and photometry"))
# --------------------------
# --- AGN Torus Emission ---
# --------------------------
add_agn = {"N": 1, "isfree": False, "init": True}
fagn = {'N': 1, 'isfree': False,
'init': 1e-4, 'units': r'L_{AGN}/L_*',
'prior': priors.LogUniform(mini=1e-5, maxi=3.0)}
agn_tau = {"N": 1, 'isfree': False,
"init": 5.0, 'units': r"optical depth",
'prior': priors.LogUniform(mini=5.0, maxi=150.)}
_agn_ = {"fagn": fagn, # FSPS parameter.
"agn_tau": agn_tau, # FSPS parameter.
"add_agn_dust": add_agn
}
TemplateLibrary["agn"] = (_agn_,
("The set of (fixed) AGN dusty torus emission parameters."))
# --------------------------
# --- IGM Absorption ---
# --------------------------
add_igm = {'N': 1, 'isfree': False, 'init': True}
igm_fact ={'N': 1, 'isfree': False, 'init': 1.0,
'units': 'factor by which to scale the Madau attenuation',
'prior': priors.ClippedNormal(mean=1.0, sigma=0.1, mini=0.0, maxi=2.0)}
_igm_ = {"add_igm_absorption": add_igm, # FSPS Parameter.
"igm_factor": igm_fact, # FSPS Parameter.
}
TemplateLibrary["igm"] = (_igm_,
("The set of (fixed) IGM absorption parameters."))
# --------------------------
# --- Spectral Smoothing ---
# --------------------------
smooth = {'N': 1, 'isfree': False, 'init': 'vel'}
fft = {'N': 1, 'isfree': False, 'init': True}
wlo = {'N': 1, 'isfree': False, 'init': 3500.0, 'units': r'$\AA$'}
whi = {'N': 1, 'isfree': False, 'init': 7800.0, 'units': r'$\AA$'}
sigma_smooth = {'N': 1, 'isfree': True,
'init': 200.0, 'units': 'km/s',
'prior': priors.TopHat(mini=10, maxi=300)}
_smoothing_ = {"smoothtype": smooth, "fftsmooth": fft, # prospecter `smoothspec` parameter
#"min_wave_smooth": wlo, "max_wave_smooth": whi,
"sigma_smooth": sigma_smooth # prospecter `smoothspec` parameter
}
TemplateLibrary["spectral_smoothing"] = (_smoothing_,
("Set of parameters for spectal smoothing."))
# --------------------------
# --- Spectral calibration
# -------------------------
spec_norm = {'N': 1, 'isfree': False,
'init': 1.0, 'units': 'f_true/f_obs',
'prior': priors.Normal(mean=1.0, sigma=0.1)}
# What order polynomial?
npoly = 12
porder = {'N': 1, 'isfree': False, 'init': npoly}
preg = {'N': 1, 'isfree': False, 'init': 0.}
polymax = 0.1 / (np.arange(npoly) + 1)
pcoeffs = {'N': npoly, 'isfree': True,
'init': np.zeros(npoly),
'units': 'ln(f_tru/f_obs)_j=\sum_{i=1}^N poly_coeffs_{i-1} * lambda_j^i',
'prior': priors.TopHat(mini=-polymax, maxi=polymax)}
_polyopt_ = {"polyorder": porder, # order of polynomial to optimize
"poly_regularization": preg, # Regularization of polynomial coeffs (can be a vector).
"spec_norm": spec_norm # Overall normalization of the spectrum.
}
_polyfit_ = {"spec_norm": spec_norm, # Overall normalization of the spectrum.
"poly_coeffs": pcoeffs # Polynomial coefficients
}
TemplateLibrary["optimize_speccal"] = (_polyopt_,
("Set of parameters (most of which are fixed) "
"for optimizing a polynomial calibration vector."))
TemplateLibrary["fit_speccal"] = (_polyfit_,
("Set of parameters (most of which are free) for sampling "
"the coefficients of a polynomial calibration vector."))
# ----------------------------
# --- Additional SF Bursts ---
# ---------------------------
fage_burst = {'N': 1, 'isfree': False,
'init': 0.0, 'units': 'time at wich burst happens, as a fraction of `tage`',
'prior': priors.TopHat(mini=0.5, maxi=1.0)}
tburst = {'N': 1, 'isfree': False,
'init': 0.0, 'units': 'Gyr',
'prior': None, 'depends_on': transforms.tburst_from_fage}
fburst = {'N': 1, 'isfree': False,
'init': 0.0, 'units': 'fraction of total mass formed in the burst',
'prior': priors.TopHat(mini=0.0, maxi=0.5)}
_burst_ = {"tburst": tburst,
"fburst": fburst,
"fage_burst": fage_burst}
TemplateLibrary["burst_sfh"] = (_burst_,
("The set of (fixed) parameters for an SF burst "
"added to a parameteric SFH, with the burst time "
"controlled by `fage_burst`."))
# -----------------------------------
# --- Nonparametric-logmass SFH ----
# -----------------------------------
# Using a (perhaps dangerously) simple nonparametric model of mass in fixed time bins with a logarithmic prior.
_nonpar_lm_ = TemplateLibrary["ssp"]
_ = _nonpar_lm_.pop("tage")
_nonpar_lm_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. Its length needs to be modified based on the number of bins
_nonpar_lm_["mass"] = {'N': 3, 'isfree': True, 'init': 1e6, 'units': r'M$_\odot$',
'prior': priors.LogUniform(mini=1e5, maxi=1e12)}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_nonpar_lm_["agebins"] = {'N': 3, 'isfree': False,
'init': [[0.0, 8.0], [8.0, 9.0], [9.0, 10.0]],
'units': 'log(yr)'}
# This is the *total* stellar mass formed
_nonpar_lm_["total_mass"] = {"N": 1, "isfree": False, "init": 1e10, "units": "Solar masses formed",
"depends_on": transforms.total_mass}
TemplateLibrary["logm_sfh"] = (_nonpar_lm_,
"Non-parameteric SFH fitting for log-mass in fixed time bins")
# ----------------------------
# --- Continuity SFH ----
# ----------------------------
# A non-parametric SFH model of mass in fixed time bins with a smoothness prior
_nonpar_continuity_ = TemplateLibrary["ssp"]
_ = _nonpar_continuity_.pop("tage")
_nonpar_continuity_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
# This is the *total* mass formed, as a variable
_nonpar_continuity_["logmass"] = {"N": 1, "isfree": True, "init": 10, 'units': 'Msun',
'prior': priors.TopHat(mini=7, maxi=12)}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. Its length needs to be modified based on the number of bins
_nonpar_continuity_["mass"] = {'N': 3, 'isfree': False, 'init': 1e6, 'units': r'M$_\odot$',
'depends_on': transforms.logsfr_ratios_to_masses}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_nonpar_continuity_["agebins"] = {'N': 3, 'isfree': False,
'init': [[0.0, 8.0], [8.0, 9.0], [9.0, 10.0]],
'units': 'log(yr)'}
# This controls the distribution of SFR(t) / SFR(t+dt). It has NBINS-1 components.
_nonpar_continuity_["logsfr_ratios"] = {'N': 2, 'isfree': True, 'init': [0.0,0.0],
'prior':priors.StudentT(mean=np.full(2,0.0),
scale=np.full(2,0.3),
df=np.full(2,2))}
TemplateLibrary["continuity_sfh"] = (_nonpar_continuity_,
"Non-parameteric SFH fitting for mass in fixed time bins with a smoothness prior")
# ----------------------------
# --- Flexible Continuity SFH ----
# ----------------------------
# A non-parametric SFH model of mass in flexible time bins with a smoothness prior
_nonpar_continuity_flex_ = TemplateLibrary["ssp"]
_ = _nonpar_continuity_flex_.pop("tage")
_nonpar_continuity_flex_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
#_nonpar_continuity_flex_["tuniv"] = {"N": 1, "isfree": False, "init": 13.7, "units": "Gyr"}
# This is the *total* mass formed
_nonpar_continuity_flex_["logmass"] = {"N": 1, "isfree": True, "init": 10, 'units': 'Msun',
'prior': priors.TopHat(mini=7, maxi=12)}
# These variables control the ratio of SFRs in adjacent bins
# there is one for a fixed "youngest" bin, one for the fixed "oldest" bin, and (N-1) for N flexible bins in between
_nonpar_continuity_flex_["logsfr_ratio_young"] = {'N': 1, 'isfree': True, 'init': 0.0, 'units': r'dlogSFR (dex)',
'prior': priors.StudentT(mean=0.0, scale=0.3, df=2)}
_nonpar_continuity_flex_["logsfr_ratio_old"] = {'N': 1, 'isfree': True, 'init': 0.0, 'units': r'dlogSFR (dex)',
'prior': priors.StudentT(mean=0.0, scale=0.3, df=2)}
_nonpar_continuity_flex_["logsfr_ratios"] = {'N': 1, 'isfree': True, 'init': 0.0, 'units': r'dlogSFR (dex)',
'prior': priors.StudentT(mean=0.0, scale=0.3, df=2)}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. Its length needs to be modified based on the total number of
# bins (including fixed young and old bin)
_nonpar_continuity_flex_["mass"] = {'N': 4, 'isfree': False, 'init': 1e6, 'units': r'M$_\odot$',
'depends_on': transforms.logsfr_ratios_to_masses_flex}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_nonpar_continuity_flex_["agebins"] = {'N': 4, 'isfree': False,
'depends_on': transforms.logsfr_ratios_to_agebins,
'init': [[0.0, 7.5], [7.5, 8.5],[8.5,9.7], [9.7, 10.136]],
'units': 'log(yr)'}
TemplateLibrary["continuity_flex_sfh"] = (_nonpar_continuity_flex_,
("Non-parameteric SFH fitting for mass in flexible time "
"bins with a smoothness prior"))
# ----------------------------
# --- Dirichlet SFH ----
# ----------------------------
# Using the dirichlet prior on SFR fractions in bins of constant SF.
_dirichlet_ = TemplateLibrary["ssp"]
_ = _dirichlet_.pop("tage")
_dirichlet_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. It's length needs to be modified based on the number of bins
_dirichlet_["mass"] = {'N': 3, 'isfree': False, 'init': 1., 'units': r'M$_\odot$',
'depends_on': transforms.zfrac_to_masses}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_dirichlet_["agebins"] = {'N': 3, 'isfree': False,
'init': [[0.0, 8.0], [8.0, 9.0], [9.0, 10.0]],
'units': 'log(yr)'}
# Auxiliary variable used for sampling sfr_fractions from dirichlet. This
# *must* be adjusted depending on the number of bins
_dirichlet_["z_fraction"] = {"N": 2, 'isfree': True, 'init': [0, 0], 'units': None,
'prior': priors.Beta(alpha=1.0, beta=1.0, mini=0.0, maxi=1.0)}
# This is the *total* stellar mass formed
_dirichlet_["total_mass"] = mass
TemplateLibrary["dirichlet_sfh"] = (_dirichlet_,
"Non-parameteric SFH with Dirichlet prior (fractional SFR)")
# ----------------------------
# --- Prospector-alpha ---
# ----------------------------
_alpha_ = TemplateLibrary["dirichlet_sfh"]
_alpha_.update(TemplateLibrary["dust_emission"])
_alpha_.update(TemplateLibrary["nebular"])
_alpha_.update(TemplateLibrary["agn"])
# Set the dust and agn emission free
_alpha_["duste_qpah"]["isfree"] = True
_alpha_["duste_umin"]["isfree"] = True
_alpha_["duste_gamma"]["isfree"] = True
_alpha_["fagn"]["isfree"] = True
_alpha_["agn_tau"]["isfree"] = True
# Complexify the dust attenuation
_alpha_["dust_type"] = {"N": 1, "isfree": False, "init": 4, "units": "FSPS index"}
_alpha_["dust2"]["prior"] = priors.TopHat(mini=0.0, maxi=4.0)
_alpha_["dust1"] = {"N": 1, "isfree": False, 'depends_on': transforms.dustratio_to_dust1,
"init": 0.0, "units": "optical depth towards young stars"}
_alpha_["dust_ratio"] = {"N": 1, "isfree": True,
"init": 1.0, "units": "ratio of birth-cloud to diffuse dust",
"prior": priors.ClippedNormal(mini=0.0, maxi=2.0, mean=1.0, sigma=0.3)}
_alpha_["dust_index"] = {"N": 1, "isfree": True,
"init": 0.0, "units": "power-law multiplication of Calzetti",
"prior": priors.TopHat(mini=-2.0, maxi=0.5)}
# in Gyr
alpha_agelims = np.array([1e-9, 0.1, 0.3, 1.0, 3.0, 6.0, 13.6])
_alpha_ = adjust_dirichlet_agebins(_alpha_, agelims=(np.log10(alpha_agelims) + 9))
TemplateLibrary["alpha"] = (_alpha_,
"The prospector-alpha model, Leja et al. 2017")
| 25,141 | 40.284072 | 130 |
py
|
prospector
|
prospector-master/prospect/models/__init__.py
|
"""This module includes objects that store parameter specfications and
efficiently convert between parameter dictionaries and parameter vectors
necessary for fitting algorithms. There are submodules for parameter priors,
common parameter transformations, and pre-defined sets of parameter
specifications.
"""
from .sedmodel import ProspectorParams, SedModel, SpecModel
#from .parameters import ProspectorParams
__all__ = ["SpecModel", "SedModel", "ProspectorParams"]
| 470 | 38.25 | 77 |
py
|
prospector
|
prospector-master/prospect/models/transforms.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""transforms.py -- This module contains parameter transformations that may be
useful to transform from parameters that are easier to _sample_ in to the
parameters required for building SED models.
They can be used as ``"depends_on"`` entries in parameter specifications.
"""
import numpy as np
from ..sources.constants import cosmo
__all__ = ["stellar_logzsol", "delogify_mass",
"tburst_from_fage", "tage_from_tuniv", "zred_to_agebins",
"dustratio_to_dust1",
"logsfr_ratios_to_masses", "logsfr_ratios_to_sfrs",
"logsfr_ratios_to_masses_flex", "logsfr_ratios_to_agebins",
"zfrac_to_masses", "zfrac_to_sfrac", "zfrac_to_sfr", "masses_to_zfrac",
"sfratio_to_sfr", "sfratio_to_mass"]
# --------------------------------------
# --- Basic Convenience Transforms ---
# --------------------------------------
def stellar_logzsol(logzsol=0.0, **extras):
"""Simple function that takes an argument list and returns the value of the
`logzsol` argument (i.e. the stellar metallicity)
:param logzsol:
FSPS stellar metaliicity parameter.
:returns logzsol:
The same.
"""
return logzsol
def delogify_mass(logmass=0.0, **extras):
"""Simple function that takes an argument list including a `logmass`
parameter and returns the corresponding linear mass.
:param logmass:
The log10(mass)
:returns mass:
The mass in linear units
"""
return 10**logmass
def total_mass(mass=0.0, **extras):
"""Simple function that takes an argument list uncluding a `mass`
parameter and returns the corresponding total mass.
:param mass:
length-N vector of masses in bins
:returns total mass:
Total mass in linear units
"""
return mass.sum()
# --------------------------------------
# Fancier transforms
# --------------------------------------
def tburst_from_fage(tage=0.0, fage_burst=0.0, **extras):
"""This function transfroms from a fractional age of a burst to an absolute
age. With this transformation one can sample in ``fage_burst`` without
worry about the case ``tburst`` > ``tage``.
:param tage:
The age of the host galaxy (Gyr)
:param fage_burst:
The fraction of the host age at which the burst occurred
:returns tburst:
The age of the host when the burst occurred (i.e. the FSPS ``tburst``
parameter)
"""
return tage * fage_burst
def tage_from_tuniv(zred=0.0, tage_tuniv=1.0, **extras):
"""This function calculates a galaxy age from the age of the univers at
``zred`` and the age given as a fraction of the age of the universe. This
allows for both ``zred`` and ``tage`` parameters without ``tage`` exceeding
the age of the universe.
:param zred:
Cosmological redshift.
:param tage_tuniv:
The ratio of ``tage`` to the age of the universe at ``zred``.
:returns tage:
The stellar population age, in Gyr
"""
tuniv = cosmo.age(zred).value
tage = tage_tuniv * tuniv
return tage
def zred_to_agebins(zred=0.0, agebins=[], **extras):
"""Set the nonparameteric SFH age bins depending on the age of the universe
at ``zred``. The first bin is not altered and the last bin is always 15% of
the upper edge of the oldest bin, but the intervening bins are evenly
spaced in log(age).
:param zred:
Cosmological redshift. This sets the age of the universe.
:param agebins:
The SFH bin edges in log10(years). ndarray of shape ``(nbin, 2)``.
:returns agebins:
The new SFH bin edges.
"""
tuniv = cosmo.age(zred).value * 1e9
tbinmax = tuniv * 0.85
ncomp = len(agebins)
agelims = list(agebins[0]) + np.linspace(agebins[1][1], np.log10(tbinmax), ncomp-2).tolist() + [np.log10(tuniv)]
return np.array([agelims[:-1], agelims[1:]]).T
def dustratio_to_dust1(dust2=0.0, dust_ratio=0.0, **extras):
"""Set the value of dust1 from the value of dust2 and dust_ratio
:param dust2:
The diffuse dust V-band optical depth (the FSPS ``dust2`` parameter.)
:param dust_ratio:
The ratio of the extra optical depth towards young stars to the diffuse
optical depth affecting all stars.
:returns dust1:
The extra optical depth towards young stars (the FSPS ``dust1``
parameter.)
"""
return dust2 * dust_ratio
# --------------------------------------
# --- Transforms for the continuity non-parametric SFHs used in (Leja et al. 2018) ---
# --------------------------------------
def logsfr_ratios_to_masses(logmass=None, logsfr_ratios=None, agebins=None,
**extras):
"""This converts from an array of log_10(SFR_j / SFR_{j+1}) and a value of
log10(\Sum_i M_i) to values of M_i. j=0 is the most recent bin in lookback
time.
"""
nbins = agebins.shape[0]
sratios = 10**np.clip(logsfr_ratios, -100, 100) # numerical issues...
dt = (10**agebins[:, 1] - 10**agebins[:, 0])
coeffs = np.array([ (1. / np.prod(sratios[:i])) * (np.prod(dt[1: i+1]) / np.prod(dt[: i]))
for i in range(nbins)])
m1 = (10**logmass) / coeffs.sum()
return m1 * coeffs
def logsfr_ratios_to_sfrs(logmass=None, logsfr_ratios=None, agebins=None, **extras):
"""Convenience function
"""
masses = logsfr_ratios_to_masses(logmass=logmass, logsfr_ratios=logsfr_ratios,
agebins=agebins)
dt = (10**agebins[:, 1] - 10**agebins[:, 0])
return masses / dt
# --------------------------------------
# --- Transforms for the flexible agebin continuity non-parametric SFHs used in (Leja et al. 2018) ---
# --------------------------------------
def logsfr_ratios_to_masses_flex(logmass=None, logsfr_ratios=None,
logsfr_ratio_young=None, logsfr_ratio_old=None,
**extras):
logsfr_ratio_young = np.clip(logsfr_ratio_young, -100, 100)
logsfr_ratio_old = np.clip(logsfr_ratio_old, -100, 100)
abins = logsfr_ratios_to_agebins(logsfr_ratios=logsfr_ratios, **extras)
nbins = abins.shape[0] - 2
syoung, sold = 10**logsfr_ratio_young, 10**logsfr_ratio_old
dtyoung, dt1 = (10**abins[:2, 1] - 10**abins[:2, 0])
dtn, dtold = (10**abins[-2:, 1] - 10**abins[-2:, 0])
mbin = (10**logmass) / (syoung*dtyoung/dt1 + sold*dtold/dtn + nbins)
myoung = syoung * mbin * dtyoung / dt1
mold = sold * mbin * dtold/dtn
n_masses = np.full(nbins, mbin)
return np.array(myoung.tolist() + n_masses.tolist() + mold.tolist())
def logsfr_ratios_to_agebins(logsfr_ratios=None, agebins=None, **extras):
"""This transforms from SFR ratios to agebins by assuming a constant amount
of mass forms in each bin agebins = np.array([NBINS,2])
use equation:
delta(t1) = tuniv / (1 + SUM(n=1 to n=nbins-1) PROD(j=1 to j=n) Sn)
where Sn = SFR(n) / SFR(n+1) and delta(t1) is width of youngest bin
"""
# numerical stability
logsfr_ratios = np.clip(logsfr_ratios, -100, 100)
# calculate delta(t) for oldest, youngest bins (fixed)
lower_time = (10**agebins[0, 1] - 10**agebins[0, 0])
upper_time = (10**agebins[-1, 1] - 10**agebins[-1, 0])
tflex = (10**agebins[-1,-1] - upper_time - lower_time)
# figure out other bin sizes
n_ratio = logsfr_ratios.shape[0]
sfr_ratios = 10**logsfr_ratios
dt1 = tflex / (1 + np.sum([np.prod(sfr_ratios[:(i+1)]) for i in range(n_ratio)]))
# translate into agelims vector (time bin edges)
agelims = [1, lower_time, dt1+lower_time]
for i in range(n_ratio):
agelims += [dt1*np.prod(sfr_ratios[:(i+1)]) + agelims[-1]]
#agelims += [tuniv[0]]
agelims += [10**agebins[-1, 1]]
agebins = np.log10([agelims[:-1], agelims[1:]]).T
return agebins
# --------------------------------------
# --- Transforms for Dirichlet non-parametric SFH used in (Leja et al. 2017) ---
# --------------------------------------
def zfrac_to_sfrac(z_fraction=None, **extras):
"""This transforms from independent dimensionless `z` variables to sfr
fractions. The transformation is such that sfr fractions are drawn from a
Dirichlet prior. See Betancourt et al. 2010 and Leja et al. 2017
:param z_fraction:
latent variables drawn form a specific set of Beta distributions. (see
Betancourt 2010)
:returns sfrac:
The star formation fractions (See Leja et al. 2017 for definition).
"""
sfr_fraction = np.zeros(len(z_fraction) + 1)
sfr_fraction[0] = 1.0 - z_fraction[0]
for i in range(1, len(z_fraction)):
sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])
sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])
return sfr_fraction
def zfrac_to_masses(total_mass=None, z_fraction=None, agebins=None, **extras):
"""This transforms from independent dimensionless `z` variables to sfr
fractions and then to bin mass fractions. The transformation is such that
sfr fractions are drawn from a Dirichlet prior. See Betancourt et al. 2010
and Leja et al. 2017
:param total_mass:
The total mass formed over all bins in the SFH.
:param z_fraction:
latent variables drawn form a specific set of Beta distributions. (see
Betancourt 2010)
:returns masses:
The stellar mass formed in each age bin.
"""
# sfr fractions
sfr_fraction = np.zeros(len(z_fraction) + 1)
sfr_fraction[0] = 1.0 - z_fraction[0]
for i in range(1, len(z_fraction)):
sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])
sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])
# convert to mass fractions
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
mass_fraction = sfr_fraction * np.array(time_per_bin)
mass_fraction /= mass_fraction.sum()
masses = total_mass * mass_fraction
return masses
# -- version of above for arrays of fractions --
#zf = np.atleast_2d(z_fraction)
#shape = list(zf.shape)
#shape[-1] += 1
#sfr_fraction = np.zeros(shape)
#sfr_fraction[..., 0] = 1.0 - z_fraction[..., 0]
#for i in range(1, shape[-1]-1):
# sfr_fraction[..., i] = (np.prod(z_fraction[..., :i], axis=-1) *
# (1.0 - z_fraction[...,i]))
#sfr_fraction[..., -1] = 1 - np.sum(sfr_fraction[..., :-1], axis=-1)
#sfr_fraction = np.squeeze(sfr_fraction)
#
# convert to mass fractions
#time_per_bin = np.diff(10**agebins, axis=-1)[:,0]
#sfr_fraction *= np.array(time_per_bin)
#mtot = np.atleast_1d(sfr_fraction.sum(axis=-1))
#mass_fraction = sfr_fraction / mtot[:, None]
#
#masses = np.atleast_2d(total_mass) * mass_fraction.T
#return masses.T
def zfrac_to_sfr(total_mass=None, z_fraction=None, agebins=None, **extras):
"""This transforms from independent dimensionless `z` variables to SFRs.
:returns sfrs:
The SFR in each age bin (msun/yr).
"""
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
masses = zfrac_to_masses(total_mass, z_fraction, agebins)
return masses / time_per_bin
def masses_to_zfrac(mass=None, agebins=None, **extras):
"""The inverse of :py:func:`zfrac_to_masses`, for setting mock parameters
based on mock bin masses.
:returns total_mass:
The total mass
:returns zfrac:
The dimensionless `z` variables used for sfr fraction parameterization.
"""
total_mass = mass.sum()
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
sfr_fraction = mass / time_per_bin
sfr_fraction /= sfr_fraction.sum()
z_fraction = np.zeros(len(sfr_fraction) - 1)
z_fraction[0] = 1 - sfr_fraction[0]
for i in range(1, len(z_fraction)):
z_fraction[i] = 1.0 - sfr_fraction[i] / np.prod(z_fraction[:i])
return total_mass, z_fraction
# --------------------------------------
# --- Transforms for SFR ratio based nonparameteric SFH ---
# --------------------------------------
def sfratio_to_sfr(sfr_ratio=None, sfr0=None, **extras):
raise(NotImplementedError)
def sfratio_to_mass(sfr_ratio=None, sfr0=None, agebins=None, **extras):
raise(NotImplementedError)
| 12,300 | 33.946023 | 116 |
py
|
prospector
|
prospector-master/prospect/sources/star_basis.py
|
from itertools import chain
import numpy as np
from numpy.polynomial.chebyshev import chebval
from scipy.spatial import Delaunay
from ..utils.smoothing import smoothspec
from .constants import lightspeed, lsun, jansky_cgs, to_cgs_at_10pc
try:
from sklearn.neighbors import KDTree
except(ImportError):
from scipy.spatial import cKDTree as KDTree
try:
from sedpy.observate import getSED, vac2air, air2vac
except(ImportError):
pass
__all__ = ["StarBasis", "BigStarBasis"]
# Useful constants
# value to go from L_sun to erg/s/cm^2 at 10pc
to_cgs = to_cgs_at_10pc
# for converting Kurucz spectral units
log4pi = np.log10(4 * np.pi)
log_rsun_cgs = np.log10(6.955) + 10
log_lsun_cgs = np.log10(lsun)
log_SB_cgs = np.log10(5.6704e-5)
log_SB_solar = log_SB_cgs + 2 * log_rsun_cgs - log_lsun_cgs
class StarBasis(object):
_spectra = None
def __init__(self, libname='ckc14_deimos.h5', verbose=False,
n_neighbors=0, log_interp=True, logify_Z=False,
use_params=None, rescale_libparams=False, in_memory=True,
**kwargs):
"""An object which holds the stellar spectral library, performs
interpolations of that library, and has methods to return attenuated,
normalized, smoothed stellar spectra. The interpolations are performed
using barycenter coordinates of the enclosing simplex found from the
Delauynay triangulation. This is not tractable for large dimension
(see BigStarBasis for that case).
:param libname:
Path to the hdf5 file to use for the spectral library.
:param verbose:
If True, print information about the parameters used when a point
is outside the convex hull.
:param n_neighbors: (default:0)
Number of nearest neighbors to use when requested parameters are
outside the convex hull of the library prameters. If ``0`` then a
ValueError is raised instead of the nearest spectrum. If greater
than 1 then the neighbors are combined using inverse distance
weights.
:param log_interp: (default:True)
Switch to interpolate in log(flux) instead of linear flux.
:param use_params:
Sequence of strings. If given, only use the listed parameters
(which must be present in the `_libparams` structure) to build the
grid and construct spectra. Otherwise all fields of `_libparams`
will be used.
:param rescale_libparams: (default: False)
If True, rescale the parameters to the unit cube before generating
the triangulation (and kd-tree). Note that the `param_vector`
method will also rescale the input parameters in this case. This
can help for nearest neighbor lookup and in the triangulation based
weights when your variables have very different scales, assuming
that the ranges give a reasonable relative distance metric.
:param in_memory: (default: True)
Switch to keep the spectral library in memory or access it through
the h5py File object. Note if the latter, then zeroed spectra are
*not* filtered out.
"""
# Cache initialization variables
self.verbose = verbose
self.logarithmic = log_interp
self.logify_Z = logify_Z
self._in_memory = in_memory
self._libname = libname
self.n_neighbors = n_neighbors
self._rescale = rescale_libparams
# Load the library
self.load_lib(libname)
# Do some important bookkeeping
if use_params is None:
self.stellar_pars = self._libparams.dtype.names
else:
self.stellar_pars = tuple(use_params)
self.ndim = len(self.stellar_pars)
# Build the triangulation and kdtree (after rescaling)
if self._rescale:
ranges = [[self._libparams[d].min(), self._libparams[d].max()]
for d in self.stellar_pars]
self.parameter_range = np.array(ranges).T
self.triangulate()
try:
self.build_kdtree()
except NameError:
pass
self.params = {}
def load_lib(self, libname='', driver=None):
"""Read a CKC library which has been pre-convolved to be close to your
resolution. This library should be stored as an HDF5 file, with the
datasets ``wavelengths``, ``parameters`` and ``spectra``. These are
ndarrays of shape (nwave,), (nmodels,), and (nmodels, nwave)
respecitvely. The ``parameters`` array is a structured array. Spectra
with no fluxes > 1e-32 are removed from the library if the librarty is
kept in memory.
"""
import h5py
f = h5py.File(libname, "r", driver=driver)
self._wave = np.array(f['wavelengths'])
self._libparams = np.array(f['parameters'])
if self._in_memory:
self._spectra = np.array(f['spectra'])
f.close()
# Filter library so that only existing spectra are included
maxf = np.max(self._spectra, axis=1)
good = maxf > 1e-32
self._libparams = self._libparams[good]
self._spectra = self._spectra[good, :]
else:
self._spectra = f['spectra']
if self.logify_Z:
from numpy.lib import recfunctions as rfn
self._libparams['Z'] = np.log10(self._libparams['Z'])
rfn.rename_fields(self._libparams, {'Z': 'logZ'})
def update(self, **kwargs):
"""Update the `params` dictionary, turning length 1 arrays into scalars
and pull out functions from length one arrays
"""
for k, val in list(kwargs.items()):
v = np.atleast_1d(val)
try:
if (len(v) == 1) and callable(v[0]):
self.params[k] = v[0]
else:
self.params[k] = np.squeeze(v)
except(KeyError):
pass
def get_spectrum(self, outwave=None, filters=None, peraa=False, **kwargs):
"""Return an attenuated, smoothed, distance dimmed stellar spectrum and SED.
:returns spec:
The spectrum on the outwave grid (assumed in air), in AB maggies.
If peraa is True then the spectrum is erg/s/cm^2/AA.
:returns phot:
Observed frame photometry in units of AB maggies. If ``lumdist``
is not present in the parameters then these are absolute maggies,
otherwise they are apparent.
:returns x:
A blob of extra quantities (e.g. mass, uncertainty)
"""
self.update(**kwargs)
# star spectrum (in Lsun/Hz)
wave, spec, unc = self.get_star_spectrum(**self.params)
spec *= self.normalize()
# dust
if 'dust_curve' in self.params:
att = self.params['dust_curve'](self._wave, **self.params)
spec *= np.exp(-att)
# Redshifting + Wavelength solution. We also convert to in-air.
a = 1 + self.params.get('zred', 0)
b = 0.0
if 'wavecal_coeffs' in self.params:
x = wave - wave.min()
x = 2.0 * (x / x.max()) - 1.0
c = np.insert(self.params['wavecal_coeffs'], 0, 0)
# assume coeeficients give shifts in km/s
b = chebval(x, c) / (lightspeed*1e-13)
wa, sa = vac2air(wave) * (a + b), spec * a
if outwave is None:
outwave = wa
# Broadening, interpolation onto output wavelength grid
if 'sigma_smooth' in self.params:
smspec = self.smoothspec(wa, sa, self.params['sigma_smooth'],
outwave=outwave, **self.params)
elif outwave is not wa:
smspec = np.interp(outwave, wa, sa, left=0, right=0)
else:
smspec = sa
# Photometry (observed frame absolute maggies)
if filters is not None:
mags = getSED(wa, sa * lightspeed / wa**2 * to_cgs, filters)
phot = np.atleast_1d(10**(-0.4 * mags))
else:
phot = 0.0
# Distance dimming. Default to 10pc distance (i.e. absolute)
dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2
if peraa:
# spectrum will be in erg/s/cm^2/AA
smspec *= to_cgs / dfactor * lightspeed / outwave**2
else:
# Spectrum will be in maggies
smspec *= to_cgs / dfactor / (3631*jansky_cgs)
# Convert from absolute maggies to apparent maggies
phot /= dfactor
return smspec, phot, None
def get_star_spectrum(self, **kwargs):
"""Given stellar parameters, obtain an interpolated spectrum at those
parameters.
:param **kwargs:
Keyword arguments must include values for the parameters listed in
``stellar_pars``.
:returns wave:
The wavelengths at which the spectrum is defined.
:returns spec:
The spectrum interpolated to the requested parameters. This has
the same units as the supplied library spectra.
:returns unc:
The uncertainty spectrum, where the uncertainty is due to
interpolation error. Curently unimplemented (i.e. it is a None
type object).
"""
inds, wghts = self.weights(**kwargs)
if self.logarithmic:
spec = np.exp(np.dot(wghts, np.log(self._spectra[inds, :])))
else:
spec = np.dot(wghts, self._spectra[inds, :])
spec_unc = None
return self._wave, spec, spec_unc
def smoothspec(self, wave, spec, sigma, outwave=None, **kwargs):
outspec = smoothspec(wave, spec, sigma, outwave=outwave, **kwargs)
return outspec
def normalize(self):
"""Use either `logr` or `logl` to normalize the spectrum. Both should
be in solar units. `logr` is checked first. If neither is present
then 1.0 is returned.
:returns norm:
Factor by which the CKC spectrum should be multiplied to get units
of L_sun/Hz. This assumes the native library spectrum is in units
of erg/s/cm^2/Hz/sr.
"""
if 'logr' in self.params:
twologr = 2. * (self.params['logr'] + log_rsun_cgs)
elif 'logl' in self.params:
twologr = ((self.params['logl'] + log_lsun_cgs) -
4 * self.params['logt'] - log_SB_cgs - log4pi)
else:
return 1.0
norm = 10**(twologr + 2 * log4pi - log_lsun_cgs)
return norm
def weights(self, **kwargs):
"""Delauynay weighting. Return indices of the models forming the
enclosing simplex, as well as the barycentric coordinates of the point
within this simplex to use as weights. If point is outside the convex
hull then fallback to nearest neighbor unless ``n_neighbors`` is 0.
"""
inparams = np.squeeze(self.param_vector(**kwargs))
triangle_ind = self._dtri.find_simplex(inparams)
if triangle_ind == -1:
self.edge_flag = True
if self.n_neighbors == 0:
pstring = ', '.join(self.ndim * ['{}={}'])
pstring = pstring.format(*chain(*zip(self.stellar_pars, inparams)))
raise ValueError("Requested spectrum ({}) outside convex hull,"
" and nearest neighbor interpolation turned "
"off.".format(*pstring))
ind, wght = self.weights_knn(inparams, k=self.n_neighbors)
if self.verbose:
print("Parameters {0} outside model convex hull. "
"Using model index {1} instead. ".format(inparams, ind))
return ind, wght
inds = self._dtri.simplices[triangle_ind, :]
transform = self._dtri.transform[triangle_ind, :, :]
Tinv = transform[:self.ndim, :]
x_r = inparams - transform[self.ndim, :]
bary = np.dot(Tinv, x_r)
last = np.clip(1.0 - bary.sum(), 0.0, 1.0)
wghts = np.append(bary, last)
oo = inds.argsort()
return inds[oo], wghts[oo]
def rescale_params(self, points):
"""Rescale the given parameters to the unit cube, if the ``_rescale`` attribute is ``True``
:param points:
An array of parameter values, of shape (npoint, ndim)
:returns x:
An array of parameter values rescaled to the unit cube, ndarray of
shape (npoint, ndim)
"""
if self._rescale:
x = np.atleast_2d(points)
x = (x - self.parameter_range[0, :]) / np.diff(self.parameter_range, axis=0)
return np.squeeze(x)
else:
return points
def triangulate(self):
"""Build the Delauynay Triangulation of the model library.
"""
# slow. should use a view based method
model_points = np.array([list(self._libparams[d])
for d in self.stellar_pars]).T
self._dtri = Delaunay(self.rescale_params(model_points))
def build_kdtree(self):
"""Build the kdtree of the model points.
"""
# slow. should use a view based method
model_points = np.array([list(self._libparams[d])
for d in self.stellar_pars])
self._kdt = KDTree(self.rescale_params(model_points.T))
def weights_knn(self, target_points, k=1):
"""The interpolation weights are determined from the inverse distance
to the k nearest neighbors.
:param target_points: ndarray, shape(ntarg,npar)
The coordinates to which you wish to interpolate.
:param k:
The number of nearest neighbors to use.
:returns inds: ndarray, shape(ntarg,npar+1)
The model indices of the interpolates.
:returns weights: narray, shape (ntarg,npar+1)
The weights of each model given by ind in the interpolates.
"""
try:
dists, inds = self._kdt.query(np.atleast_2d(target_points), k=k,
return_distance=True)
except:
return [0], [0]
inds = np.atleast_1d(np.squeeze(inds))
if k == 1:
return inds, np.ones(inds.shape)
weights = 1 / dists
# weights[np.isinf(weights)] = large_number
weights = weights/weights.sum(axis=-1)
return inds, np.atleast_1d(np.squeeze(weights))
def param_vector(self, **kwargs):
"""Take a dictionary of parameters and return the stellar library
parameter vector corresponding to these parameters as an ndarray.
Raises a KeyError if the dictionary does not contain *all* of the
required stellar parameters.
"""
pvec = [kwargs[n] for n in self.stellar_pars]
return self.rescale_params(np.array(pvec))
@property
def wavelengths(self):
return self._wave
class BigStarBasis(StarBasis):
def __init__(self, libname='', verbose=False, log_interp=True,
n_neighbors=0, driver=None, in_memory=False,
use_params=None, strictness=0.0, **kwargs):
"""An object which holds the stellar spectral library, performs linear
interpolations of that library, and has methods to return attenuated,
normalized, smoothed stellar spoectra.
This object is set up to work with large grids, so the models file is
kept open for access from disk. scikits-learn or scipy kd-trees are
required for model access. Ideally the grid should be regular (though
the spacings need not be equal along a given dimension).
:param libname:
Path to the hdf5 file to use for the spectral library.
:param n_neighbors: (default:0)
Number of nearest neighbors to use when requested parameters are
outside the convex hull of the library prameters. If ``0`` then a
ValueError is raised instead of the nearest spectrum. Does not
work, currently.
:param verbose:
If True, print information about the parameters used when a point
is outside the convex hull
:param log_interp: (default: True)
Interpolate in log(flux) instead of flux.
:param in_memory: (default: False)
Switch to determine whether the grid is loaded in memory or read
from disk each time a model is constructed (like you'd want for
very large grids).
:param use_params:
Sequence of strings. If given, only use the listed parameters
(which must be present in the `_libparams` structure) to build the
grid and construct spectra. Otherwise all fields of `_libparams`
will be used.
:param strictness: (default: 0.0)
Float from 0.0 to 1.0 that gives the fraction of a unit hypercube
that is required for a parameter position to be accepted. That is,
if the weights of the enclosing vertices sum to less than this
number, raise an error.
"""
self.verbose = verbose
self.logarithmic = log_interp
self._libname = libname
self.n_neighbors = n_neighbors
self._in_memory = in_memory
self._strictness = strictness
self.load_lib(libname, driver=driver)
# Do some important bookkeeping
if use_params is None:
self.stellar_pars = self._libparams.dtype.names
else:
self.stellar_pars = tuple(use_params)
self.ndim = len(self.stellar_pars)
self.lib_as_grid()
self.params = {}
def load_lib(self, libname='', driver=None):
"""Read a ykc library which has been preconvolved to be close to your
data resolution. This library should be stored as an HDF5 file, with
the datasets ``wavelengths``, ``parameters`` and ``spectra``. These
are ndarrays of shape (nwave,), (nmodels,), and (nmodels, nwave)
respecitvely. The ``parameters`` array is a structured array. The h5
file object is left open so that spectra can be accessed from disk.
"""
import h5py
f = h5py.File(libname, "r", driver=driver)
self._wave = np.array(f['wavelengths'])
self._libparams = np.array(f['parameters'])
if self._in_memory:
self._spectra = np.array(f['spectra'])
f.close()
else:
self._spectra = f['spectra']
def get_star_spectrum(self, **kwargs):
"""Given stellar parameters, obtain an interpolated spectrum at those
parameters.
:param **kwargs:
Keyword arguments must include values for the ``stellar_pars``
parameters that are stored in ``_libparams``.
:returns wave:
The wavelengths at which the spectrum is defined.
:returns spec:
The spectrum interpolated to the requested parameters
:returns unc:
The uncertainty spectrum, where the uncertainty is due to
interpolation error. Curently unimplemented (i.e. it is a None
type object)
"""
inds, wghts = self.weights(**kwargs)
if self.logarithmic:
spec = np.exp(np.dot(wghts, np.log(self._spectra[inds, :])))
else:
spec = np.dot(wghts, self._spectra[inds, :])
spec_unc = None
return self._wave, spec, spec_unc
def weights(self, **params):
inds = self.knearest_inds(**params)
wghts = self.linear_weights(inds, **params)
if wghts.sum() <= self._strictness:
raise ValueError("Something is wrong with the weights")
good = wghts > 0
# if good.sum() < 2**self.ndim:
# raise ValueError("Did not find all vertices of the hypercube, "
# "or there is no enclosing hypercube in the library.")
inds = inds[good]
wghts = wghts[good]
wghts /= wghts.sum()
return inds, wghts
def lib_as_grid(self):
"""Convert the library parameters to pixel indices in each dimension,
and build and store a KDTree for the pixel coordinates.
"""
# Get the unique gridpoints in each param
self.gridpoints = {}
for p in self.stellar_pars:
self.gridpoints[p] = np.unique(self._libparams[p])
# Digitize the library parameters
X = np.array([np.digitize(self._libparams[p], bins=self.gridpoints[p],
right=True) for p in self.stellar_pars])
self.X = X.T
# Build the KDTree
self._kdt = KDTree(self.X) # , metric='euclidean')
def params_to_grid(self, **targ):
"""Convert a set of parameters to grid pixel coordinates.
:param targ:
The target parameter location, as keyword arguments. The elements
of ``stellar_pars`` must be present as keywords.
:returns x:
The target parameter location in pixel coordinates.
"""
# bin index
inds = np.array([np.digitize([targ[p]], bins=self.gridpoints[p], right=False) - 1
for p in self.stellar_pars])
inds = np.squeeze(inds)
# fractional index. Could use stored denominator to be slightly faster
try:
find = [(targ[p] - self.gridpoints[p][i]) /
(self.gridpoints[p][i+1] - self.gridpoints[p][i])
for i, p in zip(inds, self.stellar_pars)]
except(IndexError):
pstring = "{0}: min={2} max={3} targ={1}\n"
s = [pstring.format(p, targ[p], *self.gridpoints[p][[0, -1]])
for p in self.stellar_pars]
raise ValueError("At least one parameter outside grid.\n{}".format(' '.join(s)))
return inds + np.squeeze(find)
def knearest_inds(self, **params):
"""Find all parameter ``vertices`` within a sphere of radius
sqrt(ndim). The parameter values are converted to pixel coordinates
before a search of the KDTree.
:param params:
Keyword arguments which must include keys corresponding to
``stellar_pars``, the parameters of the grid.
:returns inds:
The sorted indices of all vertices within sqrt(ndim) of the pixel
coordinates, corresponding to **params.
"""
# Convert from physical space to grid index space
xtarg = self.params_to_grid(**params)
# Query the tree within radius sqrt(ndim)
try:
inds = self._kdt.query_radius(xtarg.reshape(1, -1),
r=np.sqrt(self.ndim))
except(AttributeError):
inds = self._kdt.query_ball_point(xtarg.reshape(1, -1),
np.sqrt(self.ndim))
return np.sort(inds[0])
def linear_weights(self, knearest, **params):
"""Use ND-linear interpolation over the knearest neighbors.
:param knearest:
The indices of the ``vertices`` for which to calculate weights.
:param params:
The target parameter location, as keyword arguments.
:returns wght:
The weight for each vertex, computed as the volume of the hypercube
formed by the target parameter and each vertex. Vertices more than
1 away from the target in any dimension are given a weight of zero.
"""
xtarg = self.params_to_grid(**params)
x = self.X[knearest, :]
dx = xtarg - x
# Fractional pixel weights
wght = ((1 - dx) * (dx >= 0) + (1 + dx) * (dx < 0))
# set weights to zero if model is more than a pixel away
wght *= (dx > -1) * (dx < 1)
# compute hyperarea for each model and return
return wght.prod(axis=-1)
def triangle_weights(self, knearest, **params):
"""Triangulate the k-nearest models, then use the barycenter of the
enclosing simplex to interpolate.
"""
inparams = np.array([params[p] for p in self.stellar_pars])
dtri = Delaunay(self.model_points[knearest, :])
triangle_ind = dtri.find_simplex(inparams)
inds = dtri.simplices[triangle_ind, :]
transform = dtri.transform[triangle_ind, :, :]
Tinv = transform[:self.ndim, :]
x_r = inparams - transform[self.ndim, :]
bary = np.dot(Tinv, x_r)
last = 1.0 - bary.sum()
wghts = np.append(bary, last)
oo = inds.argsort()
return inds[oo], wghts[oo]
| 24,900 | 39.033762 | 99 |
py
|
prospector
|
prospector-master/prospect/sources/dust_basis.py
|
import numpy as np
try:
from sedpy.observate import getSED
except(ImportError):
pass
__all__ = ["BlackBodyDustBasis"]
# cgs constants
from .constants import lsun, pc, kboltz, hplanck
lightspeed = 29979245800.0
class BlackBodyDustBasis(object):
"""
"""
def __init__(self, **kwargs):
self.dust_parlist = ['mass', 'T', 'beta', 'kappa0', 'lambda0']
self.params = {}
self.params.update(**kwargs)
self.default_wave = np.arange(1000) # in microns
def get_spectrum(self, outwave=None, filters=None, **params):
"""Given a params dictionary, generate spectroscopy, photometry and any
extras (e.g. stellar mass).
:param outwave:
The output wavelength vector.
:param filters:
A list of sedpy filter objects.
:param **params:
Keywords forming the parameter set.
:returns spec:
The restframe spectrum in units of erg/s/cm^2/AA
:returns phot:
The apparent (redshifted) maggies in each of the filters.
:returns extras:
A list of None type objects, only included for consistency with the
SedModel class.
"""
self.params.update(**params)
if outwave is None:
outwave = self.default_wave
# Loop over number of MBBs
ncomp = len(self.params['mass'])
seds = [self.one_sed(icomp=ic, wave=outwave, filters=filters)
for ic in range(ncomp)]
# sum the components
spec = np.sum([s[0] for s in seds], axis=0)
maggies = np.sum([s[1] for s in seds], axis=0)
extra = [s[2] for s in seds]
norm = self.normalization()
spec, maggies = norm * spec, norm * maggies
return spec, maggies, extra
def one_sed(self, icomp=0, wave=None, filters=None, **extras):
"""Pull out individual component parameters from the param dictionary
and generate spectra for those components
"""
cpars = {}
for k in self.dust_parlist:
try:
cpars[k] = np.squeeze(self.params[k][icomp])
except(IndexError, TypeError):
cpars[k] = np.squeeze(self.params[k])
spec = cpars['mass'] * modified_BB(wave, **cpars)
phot = 10**(-0.4 * getSED(wave*1e4, spec, filters))
return spec, phot, None
def normalization(self):
"""This method computes the normalization (due do distance dimming,
unit conversions, etc.) based on the content of the params dictionary.
"""
return 1
def modified_BB(wave, T=20, beta=2.0, kappa0=1.92, lambda0=350, **extras):
"""Return a modified blackbody.
the normalization of the emissivity curve can be given as kappa0 and
lambda0 in units of cm^2/g and microns, default = (1.92, 350). Ouput units
are erg/s/micron/g.
"""
term = (lambda0 / wave)**beta
return planck(wave, T=T, **extras) * term * kappa0
def planck(wave, T=20.0, **extras):
"""Return planck function B_lambda (erg/s/micron) for a given T (in Kelvin) and
wave (in microns)
"""
# Return B_lambda in erg/s/micron
w = wave * 1e4 #convert from microns to cm
conv = 2 * hplank * lightspeed**2 / w**5 / 1e4
denom = (np.exp(hplanck * lightspeed / (kboltz * T)) - 1)
return conv / denom
| 3,381 | 31.209524 | 83 |
py
|
prospector
|
prospector-master/prospect/sources/constants.py
|
import numpy as np
try:
from astropy.cosmology import WMAP9 as cosmo
except(ImportError):
cosmo = None
__all__ = ['lsun', 'pc', 'lightspeed', 'ckms',
'jansky_mks', 'jansky_cgs',
'to_cgs_at_10pc', 'loge',
'kboltz', 'hplanck',
'cosmo']
# Useful constants
lsun = 3.846e33 # erg/s
pc = 3.085677581467192e18 # in cm
lightspeed = 2.998e18 # AA/s
ckms = 2.998e5 # km/s
jansky_mks = 1e-26
jansky_cgs = 1e-23
# value to go from L_sun/AA to erg/s/cm^2/AA at 10pc
to_cgs_at_10pc = lsun / (4.0 * np.pi * (pc*10)**2)
# cgs physical constants
kboltz = 1.3806488e-16
hplanck = 6.62606957e-27
# change base
loge = np.log10(np.e)
| 674 | 21.5 | 52 |
py
|
prospector
|
prospector-master/prospect/sources/elines.py
|
wavelength = {
# Balmer Lines
'Ha': 6564.61,
'Hb': 4862.69,
'Hg': 4341.69,
'Hd': 4102.92,
'He': 3971.19,
'H8': 3890.15,
'H9': 3836.48,
'H10': 3798.98,
# Paschen lines
'P9': 9231.547,
'P10': 9017.385,
'P11': 8865.217,
'P12': 8752.875,
'P13': 8667.398,
'P14': 8600.753,
'P15': 8547.730,
'P16': 8504.818,
# Carbon absorption
'CI_4621': 4622.3,
'CI_8727': 8729.39,
'CII_2326': 2326.7,
# Nitrogen
'NII_6549': 6549.9,
'NII_6585': 6585.3,
# Oxygen
'OIII_5007': 5008.24,
'OIII_4959': 4960.30,
'OIII_4363': 4364.44,
'OII_3727': 3727.09,
'OII_3729': 3729.88,
'OI_5577': 5578.89,
'OI_6300': 6302.046,
'OI_6363': 6365.54,
'NeIII_3343': 3343.90,
'NeIII_3868': 3869.81,
'NeIII_3967': 3968.53,
'SIII_6312': 6313.75,
'SIII_9069': 9071.49,
'SIII_9532': 9534.61,
'SII_6718': 6718.3,
'SII_6732': 6732.7,
'ArIII_7137': 7137.8,
'ArIII_7753': 7753.2,
'CaII_K': 3934.777,
'CaII_H': 3969.588,
'Ca_4227': 4227, # Lick
'Ca_4455': 4455, # Lick
'CaII_8500': 8500.36,
'CaII_8544': 8544.44,
'CaII_8664': 8664.52,
'NaI_5891': 5891.6,
'NaI_5897': 5897.56,
'MgI_b4': 5169.4,
'MgI_b2': 5175.7,
'MgI_b1': 5185.7,
'MgI_5530': 5530.3,
'Fe_4383': 4383, # Lick
'Fe_4531': 4531, # Lick
'Fe_4668': 4668, # Lick
'Fe_5015': 5015, # Lick
'Fe_5270': 5270, # Lick
'Fe_5335': 5335, # Lick
'Fe_5406': 5406, # Lick
'Fe_5709': 5709, # Lick
'Fe_5782': 5782, # Lick
'CN_1': 4160, # Lick
'TiO_1': 5965, # Lick
'TiO_2': 6110, # Lick
'HgI_4358': 4359.2,
'HgI_5461': 5462.5,
'HeI_3821': 3820.69,
'HeI_4010': 4010.38,
'HeI_4027': 4027.39,
'HeI_4121': 4121.98,
'HeI_4145': 4144.92,
'HeI_4389': 4389.16,
'HeI_4473': 4472.73,
'HeI_4714': 4714.5,
'HeI_4923': 4923.305,
'HeI_5017': 5017.077,
'HeI_5050': 5050.0,
'HeI_5877': 5877.30,
'HeI_6680': 6680.0,
'HeI_7067': 7067.0,
'HeII_4543': 4542.7,
'HeII_4686': 4686.9,
'HeII_5413': 5413.5,
'units': 'vacuum angstroms'}
sky_lines = ['OI_5577', 'OI_6300', 'OI_6363', 'NaI_5891', 'NaI_5897']
ism_lines = ['CaII_K', 'CaII_H', 'NaI_5891', 'NaI_5897']
| 3,122 | 26.394737 | 69 |
py
|
prospector
|
prospector-master/prospect/sources/galaxy_basis.py
|
from itertools import chain
import numpy as np
from copy import deepcopy
from .ssp_basis import SSPBasis
from ..utils.smoothing import smoothspec
from .constants import cosmo, lightspeed, jansky_cgs, to_cgs_at_10pc
try:
import fsps
from sedpy.observate import getSED, vac2air, air2vac
except(ImportError):
pass
__all__ = ["CSPSpecBasis", "MultiComponentCSPBasis",
"to_cgs"]
to_cgs = to_cgs_at_10pc
class CSPSpecBasis(SSPBasis):
"""A subclass of :py:class:`SSPBasis` for combinations of N composite
stellar populations (including single-age populations). The number of
composite stellar populations is given by the length of the ``"mass"``
parameter. Other population properties can also be vectors of the same
length as ``"mass"`` if they are independent for each component.
"""
def __init__(self, zcontinuous=1, reserved_params=['sigma_smooth'],
vactoair_flag=False, compute_vega_mags=False, **kwargs):
# This is a StellarPopulation object from fsps
self.ssp = fsps.StellarPopulation(compute_vega_mags=compute_vega_mags,
zcontinuous=zcontinuous,
vactoair_flag=vactoair_flag)
self.reserved_params = reserved_params
self.params = {}
self.update(**kwargs)
def update(self, **params):
"""Update the `params` attribute, making parameters scalar if possible.
"""
for k, v in list(params.items()):
# try to make parameters scalar
try:
if (len(v) == 1) and callable(v[0]):
self.params[k] = v[0]
else:
self.params[k] = np.squeeze(v)
except:
self.params[k] = v
def update_component(self, component_index):
"""Pass params that correspond to a single component through to the
fsps.StellarPopulation object.
:param component_index:
The index of the component for which to pull out individual
parameters that are passed to the fsps.StellarPopulation object.
"""
for k, v in list(self.params.items()):
# Parameters named like FSPS params but that we reserve for use
# here. Do not pass them to FSPS.
if k in self.reserved_params:
continue
# Otherwise if a parameter exists in the FSPS parameter set, pass a
# copy of it in.
if k in self.ssp.params.all_params:
v = np.atleast_1d(v)
try:
# Try to pull the relevant component.
this_v = v[component_index]
except(IndexError):
# Not enogh elements, use the last element.
this_v = v[-1]
except(TypeError):
# It was scalar, use that value for all components
this_v = v
self.ssp.params[k] = deepcopy(this_v)
def get_galaxy_spectrum(self, **params):
"""Update parameters, then loop over each component getting a spectrum
for each and sum with appropriate weights.
:param params:
A parameter dictionary that gets passed to the ``self.update``
method and will generally include physical parameters that control
the stellar population and output spectrum or SED.
:returns wave:
Wavelength in angstroms.
:returns spectrum:
Spectrum in units of Lsun/Hz/solar masses formed.
:returns mass_fraction:
Fraction of the formed stellar mass that still exists.
"""
self.update(**params)
spectra, linelum = [], []
mass = np.atleast_1d(self.params['mass']).copy()
mfrac = np.zeros_like(mass)
# Loop over mass components
for i, m in enumerate(mass):
self.update_component(i)
wave, spec = self.ssp.get_spectrum(tage=self.ssp.params['tage'],
peraa=False)
spectra.append(spec)
mfrac[i] = (self.ssp.stellar_mass)
linelum.append(self.ssp.emline_luminosity)
# Convert normalization units from per stellar mass to per mass formed
if np.all(self.params.get('mass_units', 'mformed') == 'mstar'):
mass /= mfrac
spectrum = np.dot(mass, np.array(spectra)) / mass.sum()
self._line_specific_luminosity = np.dot(mass, np.array(linelum)) / mass.sum()
mfrac_sum = np.dot(mass, mfrac) / mass.sum()
return wave, spectrum, mfrac_sum
class MultiComponentCSPBasis(CSPSpecBasis):
"""Similar to :py:class`CSPSpecBasis`, a class for combinations of N composite stellar
populations (including single-age populations). The number of composite
stellar populations is given by the length of the `mass` parameter.
However, in MultiComponentCSPBasis the SED of the different components are
tracked, and in get_spectrum() photometry can be drawn from a given
component or from the sum.
"""
def get_galaxy_spectrum(self, **params):
"""Update parameters, then loop over each component getting a spectrum
for each. Return all the component spectra, plus the sum.
:param params:
A parameter dictionary that gets passed to the ``self.update``
method and will generally include physical parameters that control
the stellar population and output spectrum or SED, some of which
may be vectors for the different componenets
:returns wave:
Wavelength in angstroms.
:returns spectrum:
Spectrum in units of Lsun/Hz/solar masses formed. ndarray of
shape(ncomponent+1, nwave). The last element is the sum of the
previous elements.
:returns mass_fraction:
Fraction of the formed stellar mass that still exists, ndarray of
shape (ncomponent+1,)
"""
self.update(**params)
spectra = []
mass = np.atleast_1d(self.params['mass']).copy()
mfrac = np.zeros_like(mass)
# Loop over mass components
for i, m in enumerate(mass):
self.update_component(i)
wave, spec = self.ssp.get_spectrum(tage=self.ssp.params['tage'],
peraa=False)
spectra.append(spec)
mfrac[i] = (self.ssp.stellar_mass)
# Convert normalization units from per stellar mass to per mass formed
if np.all(self.params.get('mass_units', 'mformed') == 'mstar'):
mass /= mfrac
spectrum = np.dot(mass, np.array(spectra)) / mass.sum()
mfrac_sum = np.dot(mass, mfrac) / mass.sum()
return wave, np.squeeze(spectra + [spectrum]), np.squeeze(mfrac.tolist() + [mfrac_sum])
def get_spectrum(self, outwave=None, filters=None, component=-1, **params):
"""Get a spectrum and SED for the given params, choosing from different
possible components.
:param outwave: (default: None)
Desired *vacuum* wavelengths. Defaults to the values in
`sps.wavelength`.
:param peraa: (default: False)
If `True`, return the spectrum in erg/s/cm^2/AA instead of AB
maggies.
:param filters: (default: None)
A list of filter objects for which you'd like photometry to be
calculated.
:param component: (optional, default: -1)
An optional array where each element gives the index of the
component from which to choose the magnitude. scalar or iterable
of same length as `filters`
:param **params:
Optional keywords giving parameter values that will be used to
generate the predicted spectrum.
:returns spec:
Observed frame component spectra in AB maggies, unless `peraa=True` in which
case the units are erg/s/cm^2/AA. (ncomp+1, nwave)
:returns phot:
Observed frame photometry in AB maggies, ndarray of shape (ncomp+1, nfilters)
:returns mass_frac:
The ratio of the surviving stellar mass to the total mass formed.
"""
# Spectrum in Lsun/Hz per solar mass formed, restframe
wave, spectrum, mfrac = self.get_galaxy_spectrum(**params)
# Redshifting + Wavelength solution
# We do it ourselves.
a = 1 + self.params.get('zred', 0)
af = a
b = 0.0
if 'wavecal_coeffs' in self.params:
x = wave - wave.min()
x = 2.0 * (x / x.max()) - 1.0
c = np.insert(self.params['wavecal_coeffs'], 0, 0)
# assume coeeficients give shifts in km/s
b = chebval(x, c) / (lightspeed*1e-13)
wa, sa = wave * (a + b), spectrum * af # Observed Frame
if outwave is None:
outwave = wa
# Observed frame photometry, as absolute maggies
if filters is not None:
# Magic to only do filter projections for unique filters, and get a
# mapping back into this list of unique filters
# note that this may scramble order of unique_filters
fnames = [f.name for f in filters]
unique_names, uinds, filter_ind = np.unique(fnames, return_index=True, return_inverse=True)
unique_filters = np.array(filters)[uinds]
mags = getSED(wa, lightspeed/wa**2 * sa * to_cgs, unique_filters)
phot = np.atleast_1d(10**(-0.4 * mags))
else:
phot = 0.0
filter_ind = 0
# Distance dimming and unit conversion
zred = self.params.get('zred', 0.0)
if (zred == 0) or ('lumdist' in self.params):
# Use 10pc for the luminosity distance (or a number
# provided in the dist key in units of Mpc)
dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2
else:
lumdist = cosmo.luminosity_distance(zred).value
dfactor = (lumdist * 1e5)**2
# Spectrum will be in maggies
sa *= to_cgs / dfactor / (3631*jansky_cgs)
# Convert from absolute maggies to apparent maggies
phot /= dfactor
# Mass normalization
mass = np.atleast_1d(self.params['mass'])
mass = np.squeeze(mass.tolist() + [mass.sum()])
sa = (sa * mass[:, None])
phot = (phot * mass[:, None])[component, filter_ind]
return sa, phot, mfrac
def gauss(x, mu, A, sigma):
"""Lay down mutiple gaussians on the x-axis.
"""
mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)
val = (A / (sigma * np.sqrt(np.pi * 2)) *
np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))
return val.sum(axis=-1)
| 10,936 | 37.921708 | 103 |
py
|
prospector
|
prospector-master/prospect/sources/ssp_basis.py
|
from copy import deepcopy
import numpy as np
from numpy.polynomial.chebyshev import chebval
from ..utils.smoothing import smoothspec
from .constants import cosmo, lightspeed, jansky_cgs, to_cgs_at_10pc
try:
import fsps
from sedpy.observate import getSED
except(ImportError):
pass
__all__ = ["SSPBasis", "FastSSPBasis", "FastStepBasis",
"MultiSSPBasis"]
to_cgs = to_cgs_at_10pc
class SSPBasis(object):
"""This is a class that wraps the fsps.StellarPopulation object, which is
used for producing SSPs. The ``fsps.StellarPopulation`` object is accessed
as ``SSPBasis().ssp``.
This class allows for the custom calculation of relative SSP weights (by
overriding ``all_ssp_weights``) to produce spectra from arbitrary composite
SFHs. Alternatively, the entire ``get_galaxy_spectrum`` method can be
overridden to produce a galaxy spectrum in some other way, for example
taking advantage of weight calculations within FSPS for tabular SFHs or for
parameteric SFHs.
The base implementation here produces an SSP interpolated to the age given
by ``tage``, with initial mass given by ``mass``. However, this is much
slower than letting FSPS calculate the weights, as implemented in
:py:class:`FastSSPBasis`.
Furthermore, smoothing, redshifting, and filter projections are handled
outside of FSPS, allowing for fast and more flexible algorithms.
:param reserved_params:
These are parameters which have names like the FSPS parameters but will
not be passed to the StellarPopulation object because we are overriding
their functionality using (hopefully more efficient) custom algorithms.
"""
def __init__(self, zcontinuous=1, reserved_params=['tage', 'sigma_smooth'],
interp_type='logarithmic', flux_interp='linear',
mint_log=-3, compute_vega_mags=False,
**kwargs):
"""
:param interp_type: (default: "logarithmic")
Specify whether to linearly interpolate the SSPs in log(t) or t.
For the latter, set this to "linear".
:param flux_interp': (default: "linear")
Whether to compute the final spectrum as \sum_i w_i f_i or
e^{\sum_i w_i ln(f_i)}. Basically you should always do the former,
which is the default.
:param mint_log: (default: -3)
The log of the age (in years) of the youngest SSP. Note that the
SSP at this age is assumed to have the same spectrum as the minimum
age SSP avalibale from fsps. Typically anything less than 4 or so
is fine for this parameter, since the integral converges as log(t)
-> -inf
:param reserved_params:
These are parameters which have names like the FSPS parameters but
will not be passed to the StellarPopulation object because we are
overriding their functionality using (hopefully more efficient)
custom algorithms.
"""
self.interp_type = interp_type
self.mint_log = mint_log
self.flux_interp = flux_interp
self.ssp = fsps.StellarPopulation(compute_vega_mags=compute_vega_mags,
zcontinuous=zcontinuous)
self.ssp.params['sfh'] = 0
self.reserved_params = reserved_params
self.params = {}
self.update(**kwargs)
def update(self, **params):
"""Update the parameters, passing the *unreserved* FSPS parameters
through to the ``fsps.StellarPopulation`` object.
:param params:
A parameter dictionary.
"""
for k, v in params.items():
# try to make parameters scalar
try:
if (len(v) == 1) and callable(v[0]):
self.params[k] = v[0]
else:
self.params[k] = np.squeeze(v)
except:
self.params[k] = v
# Parameters named like FSPS params but that we reserve for use
# here. Do not pass them to FSPS.
if k in self.reserved_params:
continue
# Otherwise if a parameter exists in the FSPS parameter set, pass a
# copy of it in.
if k in self.ssp.params.all_params:
self.ssp.params[k] = deepcopy(v)
# We use FSPS for SSPs !!ONLY!!
# except for FastStepBasis. And CSPSpecBasis. and...
# assert self.ssp.params['sfh'] == 0
def get_galaxy_spectrum(self, **params):
"""Update parameters, then multiply SSP weights by SSP spectra and
stellar masses, and sum.
:returns wave:
Wavelength in angstroms.
:returns spectrum:
Spectrum in units of Lsun/Hz/solar masses formed.
:returns mass_fraction:
Fraction of the formed stellar mass that still exists.
"""
self.update(**params)
# Get the SSP spectra and masses (caching the latter), adding an extra
# mass and spectrum for t=0, using the first SSP spectrum.
wave, ssp_spectra = self.ssp.get_spectrum(tage=0, peraa=False)
ssp_spectra = np.vstack([ssp_spectra[0, :], ssp_spectra])
self.ssp_stellar_masses = np.insert(self.ssp.stellar_mass, 0, 1.0)
if self.flux_interp == 'logarithmic':
ssp_spectra = np.log(ssp_spectra)
# Get weighted sum of spectra, adding the t=0 spectrum using the first SSP.
weights = self.all_ssp_weights
spectrum = np.dot(weights, ssp_spectra) / weights.sum()
if self.flux_interp == 'logarithmic':
spectrum = np.exp(spectrum)
# Get the weighted stellar_mass/mformed ratio
mass_frac = (self.ssp_stellar_masses * weights).sum() / weights.sum()
return wave, spectrum, mass_frac
def get_galaxy_elines(self):
"""Get the wavelengths and specific emission line luminosity of the nebular emission lines
predicted by FSPS. These lines are in units of Lsun/solar mass formed.
This assumes that `get_galaxy_spectrum` has already been called.
:returns ewave:
The *restframe* wavelengths of the emission lines, AA
:returns elum:
Specific luminosities of the nebular emission lines,
Lsun/stellar mass formed
"""
ewave = self.ssp.emline_wavelengths
# This allows subclasses to set their own specific emission line
# luminosities within other methods, e.g., get_galaxy_spectrum, by
# populating the `_specific_line_luminosity` attribute.
elum = getattr(self, "_line_specific_luminosity", None)
if elum is None:
elum = self.ssp.emline_luminosity.copy()
if elum.ndim > 1:
elum = elum[0]
if self.ssp.params["sfh"] == 3:
# tabular sfh
mass = np.sum(self.params.get('mass', 1.0))
elum /= mass
return ewave, elum
def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):
"""Get a spectrum and SED for the given params.
:param outwave: (default: None)
Desired *vacuum* wavelengths. Defaults to the values in
`sps.wavelength`.
:param peraa: (default: False)
If `True`, return the spectrum in erg/s/cm^2/AA instead of AB
maggies.
:param filters: (default: None)
A list of filter objects for which you'd like photometry to be calculated.
:param **params:
Optional keywords giving parameter values that will be used to
generate the predicted spectrum.
:returns spec:
Observed frame spectrum in AB maggies, unless `peraa=True` in which
case the units are erg/s/cm^2/AA.
:returns phot:
Observed frame photometry in AB maggies.
:returns mass_frac:
The ratio of the surviving stellar mass to the total mass formed.
"""
# Spectrum in Lsun/Hz per solar mass formed, restframe
wave, spectrum, mfrac = self.get_galaxy_spectrum(**params)
# Redshifting + Wavelength solution
# We do it ourselves.
a = 1 + self.params.get('zred', 0)
af = a
b = 0.0
if 'wavecal_coeffs' in self.params:
x = wave - wave.min()
x = 2.0 * (x / x.max()) - 1.0
c = np.insert(self.params['wavecal_coeffs'], 0, 0)
# assume coeeficients give shifts in km/s
b = chebval(x, c) / (lightspeed*1e-13)
wa, sa = wave * (a + b), spectrum * af # Observed Frame
if outwave is None:
outwave = wa
# Observed frame photometry, as absolute maggies
if filters is not None:
mags = getSED(wa, lightspeed/wa**2 * sa * to_cgs, filters)
phot = np.atleast_1d(10**(-0.4 * mags))
else:
phot = 0.0
# Spectral smoothing.
do_smooth = (('sigma_smooth' in self.params) and
('sigma_smooth' in self.reserved_params))
if do_smooth:
# We do it ourselves.
smspec = self.smoothspec(wa, sa, self.params['sigma_smooth'],
outwave=outwave, **self.params)
elif outwave is not wa:
# Just interpolate
smspec = np.interp(outwave, wa, sa, left=0, right=0)
else:
# no interpolation necessary
smspec = sa
# Distance dimming and unit conversion
zred = self.params.get('zred', 0.0)
if (zred == 0) or ('lumdist' in self.params):
# Use 10pc for the luminosity distance (or a number
# provided in the dist key in units of Mpc)
dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2
else:
lumdist = cosmo.luminosity_distance(zred).value
dfactor = (lumdist * 1e5)**2
if peraa:
# spectrum will be in erg/s/cm^2/AA
smspec *= to_cgs / dfactor * lightspeed / outwave**2
else:
# Spectrum will be in maggies
smspec *= to_cgs / dfactor / (3631*jansky_cgs)
# Convert from absolute maggies to apparent maggies
phot /= dfactor
# Mass normalization
mass = np.sum(self.params.get('mass', 1.0))
if np.all(self.params.get('mass_units', 'mformed') == 'mstar'):
# Convert input normalization units from current stellar mass to mass formed
mass /= mfrac
return smspec * mass, phot * mass, mfrac
@property
def all_ssp_weights(self):
"""Weights for a single age population. This is a slow way to do this!
"""
if self.interp_type == 'linear':
sspages = np.insert(10**self.logage, 0, 0)
tb = self.params['tage'] * 1e9
elif self.interp_type == 'logarithmic':
sspages = np.insert(self.logage, 0, self.mint_log)
tb = np.log10(self.params['tage']) + 9
ind = np.searchsorted(sspages, tb) # index of the higher bracketing lookback time
dt = (sspages[ind] - sspages[ind - 1])
ww = np.zeros(len(sspages))
ww[ind - 1] = (sspages[ind] - tb) / dt
ww[ind] = (tb - sspages[ind-1]) / dt
return ww
def smoothspec(self, wave, spec, sigma, outwave=None, **kwargs):
outspec = smoothspec(wave, spec, sigma, outwave=outwave, **kwargs)
return outspec
@property
def logage(self):
return self.ssp.ssp_ages.copy()
@property
def wavelengths(self):
return self.ssp.wavelengths.copy()
class FastSSPBasis(SSPBasis):
"""A subclass of :py:class:`SSPBasis` that is a faster way to do SSP models by letting
FSPS do the weight calculations.
"""
def get_galaxy_spectrum(self, **params):
self.update(**params)
wave, spec = self.ssp.get_spectrum(tage=float(self.params['tage']), peraa=False)
return wave, spec, self.ssp.stellar_mass
class FastStepBasis(SSPBasis):
"""Subclass of :py:class:`SSPBasis` that implements a "nonparameteric"
(i.e. binned) SFH. This is accomplished by generating a tabular SFH with
the proper form to be passed to FSPS. The key parameters for this SFH are:
* ``agebins`` - array of shape ``(nbin, 2)` giving the younger and older
(in lookback time) edges of each bin in log10(years)
* ``mass`` - array of shape ``(nbin,)`` giving the total stellar mass
(in solar masses) **formed** in each bin.
"""
def get_galaxy_spectrum(self, **params):
"""Construct the tabular SFH and feed it to the ``ssp``.
"""
self.update(**params)
mtot = self.params['mass'].sum()
time, sfr, tmax = self.convert_sfh(self.params['agebins'], self.params['mass'])
self.ssp.params["sfh"] = 3 # Hack to avoid rewriting the superclass
self.ssp.set_tabular_sfh(time, sfr)
wave, spec = self.ssp.get_spectrum(tage=tmax, peraa=False)
return wave, spec / mtot, self.ssp.stellar_mass / mtot
def convert_sfh(self, agebins, mformed, epsilon=1e-4, maxage=None):
"""Given arrays of agebins and formed masses with each bin, calculate a
tabular SFH. The resulting time vector has time points either side of
each bin edge with a "closeness" defined by a parameter epsilon.
:param agebins:
An array of bin edges, log(yrs). This method assumes that the
upper edge of one bin is the same as the lower edge of another bin.
ndarray of shape ``(nbin, 2)``
:param mformed:
The stellar mass formed in each bin. ndarray of shape ``(nbin,)``
:param epsilon: (optional, default 1e-4)
A small number used to define the fraction time separation of
adjacent points at the bin edges.
:param maxage: (optional, default: ``None``)
A maximum age of stars in the population, in yrs. If ``None`` then the maximum
value of ``agebins`` is used. Note that an error will occur if maxage
< the maximum age in agebins.
:returns time:
The output time array for use with sfh=3, in Gyr. ndarray of shape (2*N)
:returns sfr:
The output sfr array for use with sfh=3, in M_sun/yr. ndarray of shape (2*N)
:returns maxage:
The maximum valid age in the returned isochrone.
"""
#### create time vector
agebins_yrs = 10**agebins.T
dt = agebins_yrs[1, :] - agebins_yrs[0, :]
bin_edges = np.unique(agebins_yrs)
if maxage is None:
maxage = agebins_yrs.max() # can replace maxage with something else, e.g. tuniv
t = np.concatenate((bin_edges * (1.-epsilon), bin_edges * (1+epsilon)))
t.sort()
t = t[1:-1] # remove older than oldest bin, younger than youngest bin
fsps_time = maxage - t
#### calculate SFR at each t
sfr = mformed / dt
sfrout = np.zeros_like(t)
sfrout[::2] = sfr
sfrout[1::2] = sfr # * (1+epsilon)
return (fsps_time / 1e9)[::-1], sfrout[::-1], maxage / 1e9
class MultiSSPBasis(SSPBasis):
"""An array of basis spectra with different ages, metallicities, and possibly dust
attenuations.
"""
def get_galaxy_spectrum(self):
raise(NotImplementedError)
| 15,536 | 38.234848 | 98 |
py
|
prospector
|
prospector-master/prospect/sources/boneyard.py
|
import numpy as np
from scipy.special import expi, gammainc
from .ssp_basis import SSPBasis
__all__ = ["CSPBasis", "StepSFHBasis", "CompositeSFH", "LinearSFHBasis"]
# change base
from .constants import loge
class CSPBasis(object):
"""
A class for composite stellar populations, which can be composed from
multiple versions of parameterized SFHs. Deprecated, Use CSPSpecBasis instead.
"""
def __init__(self, compute_vega_mags=False, zcontinuous=1, vactoair_flag=False, **kwargs):
# This is a StellarPopulation object from fsps
self.csp = fsps.StellarPopulation(compute_vega_mags=compute_vega_mags,
zcontinuous=zcontinuous,
vactoair_flag=vactoair_flag)
self.params = {}
def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):
"""Given a theta vector, generate spectroscopy, photometry and any
extras (e.g. stellar mass).
:param theta:
ndarray of parameter values.
:param sps:
A python-fsps StellarPopulation object to be used for
generating the SED.
:returns spec:
The restframe spectrum in units of maggies.
:returns phot:
The apparent (redshifted) observed frame maggies in each of the
filters.
:returns extras:
A list of the ratio of existing stellar mass to total mass formed
for each component, length ncomp.
"""
self.params.update(**params)
# Pass the model parameters through to the sps object
ncomp = len(self.params['mass'])
for ic in range(ncomp):
s, p, x = self.one_sed(component_index=ic, filterlist=filters)
try:
spec += s
maggies += p
extra += [x]
except(NameError):
spec, maggies, extra = s, p, [x]
# `spec` is now in Lsun/Hz, with the wavelength array being the
# observed frame wavelengths. Flux array (and maggies) have not been
# increased by (1+z) due to cosmological redshift
w = self.ssp.wavelengths
if outwave is not None:
spec = np.interp(outwave, w, spec)
else:
outwave = w
# Distance dimming and unit conversion
zred = self.params.get('zred', 0.0)
if (zred == 0) or ('lumdist' in self.params):
# Use 10pc for the luminosity distance (or a number provided in the
# lumdist key in units of Mpc). Do not apply cosmological (1+z)
# factor to the flux.
dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2
a = 1.0
else:
# Use the comsological luminosity distance implied by this
# redshift. Cosmological (1+z) factor on the flux was already done in one_sed
lumdist = cosmo.luminosity_distance(zred).value
dfactor = (lumdist * 1e5)**2
if peraa:
# spectrum will be in erg/s/cm^2/AA
spec *= to_cgs / dfactor * lightspeed / outwave**2
else:
# Spectrum will be in maggies
spec *= to_cgs / dfactor / (3631*jansky_cgs)
# Convert from absolute maggies to apparent maggies
maggies /= dfactor
return spec, maggies, extra
def one_sed(self, component_index=0, filterlist=[]):
"""Get the SED of one component for a multicomponent composite SFH.
Should set this up to work as an iterator.
:param component_index:
Integer index of the component to calculate the SED for.
:param filterlist:
A list of strings giving the (FSPS) names of the filters onto which
the spectrum will be projected.
:returns spec:
The restframe spectrum in units of Lsun/Hz.
:returns maggies:
Broadband fluxes through the filters named in ``filterlist``,
ndarray. Units are observed frame absolute maggies: M = -2.5 *
log_{10}(maggies).
:returns extra:
The extra information corresponding to this component.
"""
# Pass the model parameters through to the sps object, and keep track
# of the mass of this component
mass = 1.0
for k, vs in list(self.params.items()):
try:
v = vs[component_index]
except(IndexError, TypeError):
v = vs
if k in self.csp.params.all_params:
self.csp.params[k] = deepcopy(v)
if k == 'mass':
mass = v
# Now get the spectrum. The spectrum is in units of
# Lsun/Hz/per solar mass *formed*, and is restframe
w, spec = self.csp.get_spectrum(tage=self.csp.params['tage'], peraa=False)
# redshift and get photometry. Note we are boosting fnu by (1+z) *here*
a, b = (1 + self.csp.params['zred']), 0.0
wa, sa = w * (a + b), spec * a # Observed Frame
if filterlist is not None:
mags = getSED(wa, lightspeed/wa**2 * sa * to_cgs, filterlist)
phot = np.atleast_1d(10**(-0.4 * mags))
else:
phot = 0.0
# now some mass normalization magic
mfrac = self.csp.stellar_mass
if np.all(self.params.get('mass_units', 'mstar') == 'mstar'):
# Convert input normalization units from per stellar masss to per mass formed
mass /= mfrac
# Output correct units
return mass * sa, mass * phot, mfrac
class StepSFHBasis(SSPBasis):
"""Subclass of SSPBasis that computes SSP weights for piecewise constant
SFHs (i.e. a binned SFH). The parameters for this SFH are:
* `agebins` - array of shape (nbin, 2) giving the younger and older (in
lookback time) edges of each bin. If `interp_type` is `"linear"',
these are assumed to be in years. Otherwise they are in log10(years)
* `mass` - array of shape (nbin,) giving the total surviving stellar mass
(in solar masses) in each bin, unless the `mass_units` parameter is set
to something different `"mstar"`, in which case the units are assumed
to be total stellar mass *formed* in each bin.
The `agebins` parameter *must not be changed* without also setting
`self._ages=None`.
"""
@property
def all_ssp_weights(self):
# Cache age bins and relative weights. This means params['agebins']
# *must not change* without also setting _ages = None
if getattr(self, '_ages', None) is None:
self._ages = self.params['agebins']
nbin, nssp = len(self._ages), len(self.logage) + 1
self._bin_weights = np.zeros([nbin, nssp])
for i, (t1, t2) in enumerate(self._ages):
# These *should* sum to one (or zero) for each bin
self._bin_weights[i, :] = self.bin_weights(t1, t2)
# Now normalize the weights in each bin by the mass parameter, and sum
# over bins.
bin_masses = self.params['mass']
if np.all(self.params.get('mass_units', 'mformed') == 'mstar'):
# Convert from mstar to mformed for each bin. We have to do this
# here as well as in get_spectrum because the *relative*
# normalization in each bin depends on the units, as well as the
# overall normalization.
bin_masses /= self.bin_mass_fraction
w = (bin_masses[:, None] * self._bin_weights).sum(axis=0)
return w
@property
def bin_mass_fraction(self):
"""Return the ratio m_star(surviving) / m_formed for each bin.
"""
try:
mstar = self.ssp_stellar_masses
w = self._bin_weights
bin_mfrac = (mstar[None, :] * w).sum(axis=-1) / w.sum(axis=-1)
return bin_mfrac
except(AttributeError):
print('agebin info or ssp masses not chached?')
return 1.0
def bin_weights(self, amin, amax):
"""Compute normalizations required to get a piecewise constant SFH
within an age bin. This is super complicated and obscured. The output
weights are such that one solar mass will have formed during the bin
(i.e. SFR = 1/(amax-amin))
This computes weights using \int_tmin^tmax dt (\log t_i - \log t) /
(\log t_{i+1} - \log t_i) but see sfh.tex for the detailed calculation
and the linear time interpolation case.
"""
if self.interp_type == 'linear':
sspages = np.insert(10**self.logage, 0, 0)
func = constant_linear
mass = amax - amin
elif self.interp_type == 'logarithmic':
sspages = np.insert(self.logage, 0, self.mint_log)
func = constant_logarithmic
mass = 10**amax - 10**amin
assert amin >= sspages[0]
assert amax <= sspages.max()
# below could be done by using two separate dt vectors instead of two
# age vectors
ages = np.array([sspages[:-1], sspages[1:]])
dt = np.diff(ages, axis=0)
tmin, tmax = np.clip(ages, amin, amax)
# get contributions from SSP sub-bin to the left and from SSP sub-bin
# to the right
left, right = (func(ages, tmax) - func(ages, tmin)) / dt
# put into full array
ww = np.zeros(len(sspages))
ww[:-1] += right # last element has no sub-bin to the right
ww[1:] += -left # need to flip sign
# normalize to 1 solar mass formed and return
return ww / mass
class CompositeSFH(SSPBasis):
"""Subclass of SSPBasis that computes SSP weights for a parameterized SF.
The parameters for this SFH are:
* `sfh_type` - String of "delaytau", "tau", "simha"
* `tage`, `sf_trunc`, `sf_slope`, `const`, `fburst`, `tau`
* `mass` -
"""
def configure(self):
"""This reproduces FSPS-like combinations of SFHs. Note that the
*same* parameter set is passed to each component in the combination
"""
sfhs = [self.sfh_type]
limits = len(sfhs) * ['regular']
if 'simha' in self.sfh_type:
sfhs = ['delaytau', 'linear']
limits = ['regular', 'simha']
fnames = ['{0}_{1}'.format(f, self.interp_type) for f in sfhs]
lnames = ['{}_limits'.format(f) for f in limits]
self.funcs = [globals()[f] for f in fnames]
self.limits = [globals()[f] for f in lnames]
if self.interp_type == 'linear':
sspages = np.insert(10**self.logage, 0, 0)
elif self.interp_type == 'logarithmic':
sspages = np.insert(self.logage, 0, self.mint_log)
self.ages = np.array([sspages[:-1], sspages[1:]])
self.dt = np.diff(self.ages, axis=0)
@property
def _limits(self):
pass
@property
def _funcs(self):
pass
@property
def all_ssp_weights(self):
# Full output weight array. We keep separate vectors for each
# component so we can renormalize after the loop, but for many
# components it would be better to renormalize and sum within the loop
ww = np.zeros([len(self.funcs), self.ages.shape[-1] + 1])
# Loop over components. Note we are sending the same params to every component
for i, (limit, func) in enumerate(zip(self.limits, self.funcs)):
ww[i, :] = self.ssp_weights(func, limit, self.params)
# renormalize each component to 1 Msun
assert np.all(ww >= 0)
wsum = ww.sum(axis=1)
# unless truly no SF in the component
if 0 in wsum:
wsum[wsum == 0] = 1.0
ww /= wsum[:, None]
# apply relative normalizations
ww *= self.normalizations(**self.params)[:, None]
# And finally add all components together and renormalize again to
# 1Msun and return
return ww.sum(axis=0) / ww.sum()
def ssp_weights(self, integral, limit_function, params, **extras):
# build full output weight vector
ww = np.zeros(self.ages.shape[-1] + 1)
tmin, tmax = limit_function(self.ages, mint_log=self.mint_log,
interp_type=self.interp_type, **params)
left, right = (integral(self.ages, tmax, **params) -
integral(self.ages, tmin, **params)) / self.dt
# Put into full array, shifting the `right` terms by 1 element
ww[:-1] += right # last SSP has no sub-bin to the right
ww[1:] += -left # need to flip sign
# Note that now ww[i,1] = right[1] - left[0], where
# left[0] is the integral from tmin,0 to tmax,0 of
# SFR(t) * (sspages[0] - t)/(sspages[1] - sspages[0]) and
# right[1] is the integral from tmin,1 to tmax,1 of
# SFR(t) * (sspages[2] - t)/(sspages[2] - sspages[1])
return ww
def normalizations(self, tage=0., sf_trunc=0, sf_slope=0, const=0,
fburst=0, tau=0., **extras):
if (sf_trunc <= 0) or (sf_trunc > tage):
Tmax = tage
else:
Tmax = sf_trunc
# Tau models. SFH=1 -> power=1; SFH=4,5 -> power=2
if ('delay' in self.sfh_type) or ('simha' in self.sfh_type):
power = 2.
else:
power = 1.
mass_tau = tau * gammainc(power, Tmax/tau)
if 'simha' not in self.sfh_type:
return np.array([mass_tau])
# SFR at Tmax
sfr_q = (Tmax/tau)**(power-1) * np.exp(-Tmax/tau)
# linear. integral of (1 - m * (T - Tmax)) from Tmax to Tzero
if sf_slope == 0.:
Tz = tage
else:
Tz = Tmax + 1/np.float64(sf_slope)
if (Tz < Tmax) or (Tz > tage) or (not np.isfinite(Tz)):
Tz = tage
m = sf_slope
mass_linear = (Tz - Tmax) - m/2.*(Tz**2 + Tmax**2) + m*Tz*Tmax
# normalize the linear portion relative to the tau portion
norms = np.array([1, mass_linear * sfr_q / mass_tau])
norms /= norms.sum()
# now add in constant and burst
if (const > 0) or (fburst > 0):
norms = (1-fburst-const) * norms
norms.tolist().extend([const, fburst])
return np.array(norms)
class LinearSFHBasis(SSPBasis):
"""Subclass of SSPBasis that computes SSP weights for piecewise linear SFHs
(i.e. a linearly interpolated tabular SFH). The parameters for this SFH
are:
* `ages` - array of shape (ntab,) giving the lookback time of each
tabulated SFR. If `interp_type` is `"linear"', these are assumed to be
in years. Otherwise they are in log10(years)
* `sfr` - array of shape (ntab,) giving the SFR (in Msun/yr)
* `logzsol`
* `dust2`
"""
def get_galaxy_spectrum(self):
raise(NotImplementedError)
def regular_limits(ages, tage=0., sf_trunc=0., mint_log=-3,
interp_type='logarithmic', **extras):
# get the truncation time in units of lookback time
if (sf_trunc <= 0) or (sf_trunc > tage):
tq = 0
else:
tq = tage - sf_trunc
if interp_type == 'logarithmic':
tq = np.log10(np.max([tq, 10**mint_log]))
tage = np.log10(np.max([tage, 10**mint_log]))
return np.clip(ages, tq, tage)
def simha_limits(ages, tage=0., sf_trunc=0, sf_slope=0., mint_log=-3,
interp_type='logarithmic', **extras):
# get the truncation time in units of lookback time
if (sf_trunc <= 0) or (sf_trunc > tage):
tq = 0
else:
tq = tage - sf_trunc
t0 = tq - 1. / np.float64(sf_slope)
if (t0 > tq) or (t0 <= 0) or (not np.isfinite(t0)):
t0 = 0.
if interp_type == 'logarithmic':
tq = np.log10(np.max([tq, 10**mint_log]))
t0 = np.log10(np.max([t0, 10**mint_log]))
return np.clip(ages, t0, tq)
def constant_linear(ages, t, **extras):
"""Indefinite integral for SFR = 1
:param ages:
Linear age(s) of the SSPs.
:param t:
Linear time at which to evaluate the indefinite integral
"""
return ages * t - t**2 / 2
def constant_logarithmic(logages, logt, **extras):
"""SFR = 1
"""
t = 10**logt
return t * (logages - logt + loge)
def tau_linear(ages, t, tau=None, **extras):
"""SFR = e^{(tage-t)/\tau}
"""
return (ages - t + tau) * np.exp(t / tau)
def tau_logarithmic(logages, logt, tau=None, **extras):
"""SFR = e^{(tage-t)/\tau}
"""
tprime = 10**logt / tau
return (logages - logt) * np.exp(tprime) + loge * expi(tprime)
def delaytau_linear(ages, t, tau=None, tage=None, **extras):
"""SFR = (tage-t) * e^{(tage-t)/\tau}
"""
bracket = tage * ages - (tage + ages)*(t - tau) + t**2 - 2*t*tau + 2*tau**2
return bracket * np.exp(t / tau)
def delaytau_logarithmic(logages, logt, tau=None, tage=None, **extras):
"""SFR = (tage-t) * e^{(tage-t)/\tau}
"""
t = 10**logt
tprime = t / tau
a = (t - tage - tau) * (logt - logages) - tau * loge
b = (tage + tau) * loge
return a * np.exp(tprime) + b * expi(tprime)
def linear_linear(ages, t, tage=None, sf_trunc=0, sf_slope=0., **extras):
"""SFR = [1 - sf_slope * (tage-t)]
"""
tq = np.max([0, tage-sf_trunc])
k = 1 - sf_slope * tq
return k * ages * t + (sf_slope*ages - k) * t**2 / 2 - sf_slope * t**3 / 3
def linear_logarithmic(logages, logt, tage=None, sf_trunc=0, sf_slope=0., **extras):
"""SFR = [1 - sf_slope * (tage-t)]
"""
tq = np.max([0, tage-sf_trunc])
t = 10**logt
k = 1 - sf_slope * tq
term1 = k * t * (logages - logt + loge)
term2 = sf_slope * t**2 / 2 * (logages - logt + loge / 2)
return term1 + term2
def burst_linear(ages, t, tburst=None, **extras):
"""Burst. SFR = \delta(t-t_burst)
"""
return ages - tburst
def burst_logarithmic(logages, logt, tburst=None, **extras):
"""Burst. SFR = \delta(t-t_burst)
"""
return logages - np.log10(tburst)
| 18,149 | 36.192623 | 94 |
py
|
prospector
|
prospector-master/prospect/sources/__init__.py
|
from .galaxy_basis import *
from .ssp_basis import *
from .star_basis import *
from .dust_basis import *
from .boneyard import StepSFHBasis
__all__ = ["to_cgs",
"CSPSpecBasis", "MultiComponentCSPBasis",
"FastSSPBasis", "SSPBasis",
"FastStepBasis", "StepSFHBasis",
"StarBasis", "BigStarBasis",
"BlackBodyDustBasis"]
| 371 | 27.615385 | 52 |
py
|
prospector
|
prospector-master/prospect/utils/plotting.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
try:
import matplotlib.pyplot as pl
except(ImportError):
pass
__all__ = ["get_best", "get_truths", "get_percentiles", "get_stats",
"posterior_samples", "hist_samples", "joint_pdf", "compute_sigma_level",
"trim_walkers", "fill_between", "figgrid"]
def get_best(res, **kwargs):
"""Get the maximum a posteriori parameters.
"""
imax = np.argmax(res['lnprobability'])
# there must be a more elegant way to deal with differnt shapes
try:
i, j = np.unravel_index(imax, res['lnprobability'].shape)
theta_best = res['chain'][i, j, :].copy()
except(ValueError):
theta_best = res['chain'][imax, :].copy()
try:
theta_names = res.get('theta_labels', res['model'].theta_labels())
except(KeyError):
theta_names = None
return theta_names, theta_best
def get_truths(res):
import pickle
try:
mock = pickle.loads(res['obs']['mock_params'])
res['obs']['mock_params'] = mock
except:
pass
try:
return res['obs']['mock_params']
except(KeyError):
return None
def get_percentiles(res, ptile=[16, 50, 84], start=0.0, thin=1, **extras):
"""Get get percentiles of the marginalized posterior for each parameter.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param ptile: (optional, default: [16, 50, 84])
A list of percentiles (integers 0 to 100) to return for each parameter.
:param start: (optional, default: 0.5)
How much of the beginning of chains to throw away before calculating
percentiles, expressed as a fraction of the total number of iterations.
:param thin: (optional, default: 10.0)
Only use every ``thin`` iteration when calculating percentiles.
:returns pcts:
Dictionary with keys giving the parameter names and values giving the
requested percentiles for that parameter.
"""
parnames = np.array(res.get('theta_labels', res['model'].theta_labels()))
niter = res['chain'].shape[-2]
start_index = np.floor(start * (niter-1)).astype(int)
if res["chain"].ndim > 2:
flatchain = res['chain'][:, start_index::thin, :]
dims = flatchain.shape
flatchain = flatchain.reshape(dims[0]*dims[1], dims[2])
elif res["chain"].ndim == 2:
flatchain = res["chain"][start_index::thin, :]
pct = np.array([quantile(p, ptile, weights=res.get("weights", None)) for p in flatchain.T])
return dict(zip(parnames, pct))
def quantile(data, percents, weights=None):
''' percents in units of 1%
weights specifies the frequency (count) of data.
'''
if weights is None:
return np.percentile(data, percents)
ind = np.argsort(data)
d = data[ind]
w = weights[ind]
p = 1.*w.cumsum()/w.sum()*100
y = np.interp(percents, p, d)
return y
def get_stats(res, pnames, **kwargs):
"""For selected parameters, get the truth (if known), the MAP value from
the chain, and the percentiles.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param pnames:
List of strings giving the names of the desired parameters.
"""
truth = get_truths(res)
best = dict(zip(*get_best(res)))
pct = get_percentiles(res, **kwargs)
if truth is not None:
truths = np.array([truth[k] for k in pnames])
else:
truths = None
pcts = np.array([pct[k] for i,k in enumerate(pnames)])
bests = np.array([best[k] for i,k in enumerate(pnames)])
return pnames, truths, bests, pcts
def trim_walkers(res, threshold=-1e4):
"""Remove walkers with probability below some threshold. Useful for
removing stuck walkers
"""
good = res['lnprobability'][:, -1] > threshold
trimmed = {}
trimmed['chain'] = res['chain'][good, :, :]
trimmed['lnprobability'] = res['lnprobability'][good, :]
trimmed['model'] = res['model']
return trimmed
def joint_pdf(res, p1, p2, pmap={}, **kwargs):
"""Build a 2-dimensional array representing the binned joint PDF of 2
parameters, in terms of sigma or fraction of the total distribution.
For example, to plot contours of the joint PDF of parameters ``"parname1"``
and ``"parname2"`` from the last half of a chain with 30bins in each
dimension;
::
xb, yb, sigma = joint_pdf(res, parname1, parname2, nbins=30, start=0.5)
ax.contour(xb, yb, sigma, **plotting_kwargs)
:param p1:
The name of the parameter for the x-axis
:param p2:
The name of the parameter for the y axis
:returns xb, yb, sigma:
The bins and the 2-d histogram
"""
trace, pars = hist_samples(res, [p1, p2], **kwargs)
trace = trace.copy().T
if pars[0] == p1:
trace = trace[::-1, :]
x = pmap.get(p2, lambda x: x)(trace[0])
y = pmap.get(p1, lambda x: x)(trace[1])
xbins, ybins, sigma = compute_sigma_level(x, y, **kwargs)
return xbins, ybins, sigma.T
def posterior_samples(res, nsample=None, **kwargs):
"""Pull samples of theta from the MCMC chain
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param nsample:
Number of random samples to draw.
:param **kwargs:
Extra keywords are passed to ``hist_samples``.
:returns thetas:
A list of parameter vectors pulled at random from the chain, of same
length as ``samples``.
"""
flatchain, pnames = hist_samples(res, **kwargs)
weights = res.get("weights", None)
ns = flatchain.shape[0]
if nsample is None:
nsample = ns
s = np.random.choice(ns, p=weights, size=nsample)
thetas = flatchain[s, :]
return thetas
def hist_samples(res, showpars=None, start=0, thin=1,
return_lnprob=False, **extras):
"""Get posterior samples for the parameters listed in ``showpars``. This
can be done for different ending fractions of the (thinned) chain.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param showpars:
A list of strings giving the desired parameters.
:param start: (optional, default: 0.5)
How much of the beginning of chains to throw away before calculating
percentiles, expressed as a fraction of the total number of iterations.
:param thin: (optional, default: 10.0)
Only use every ``thin`` iteration when calculating percentiles.
"""
parnames = np.array(res.get('theta_labels', res['model'].theta_labels()))
niter = res['chain'].shape[-2]
start_index = np.floor(start * (niter-1)).astype(int)
if res["chain"].ndim > 2:
# emcee
flatchain = res['chain'][:, start_index::thin, :]
dims = flatchain.shape
flatchain = flatchain.reshape(dims[0]*dims[1], dims[2])
flatlnprob = res['lnprobability'][:, start_index::thin].reshape(dims[0]*dims[1])
elif res["chain"].ndim == 2:
# dynesty
flatchain = res["chain"][start_index::thin, :]
flatlnprob = res['lnprobability'][start_index::thin]
if showpars is None:
ind_show = slice(None)
else:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
flatchain = flatchain[:, ind_show]
if return_lnprob:
return flatchain, parnames[ind_show], flatlnprob
return flatchain, parnames[ind_show]
def compute_sigma_level(trace1, trace2, nbins=30, weights=None, extents=None, **extras):
"""From a set of traces in two parameters, make a 2-d histogram of number
of standard deviations. Following examples from J Vanderplas.
"""
L, xbins, ybins = np.histogram2d(trace1, trace2, bins=nbins,
weights=weights,
range=extents)
L[L == 0] = 1E-16
logL = np.log(L)
shape = L.shape
L = L.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(L)[::-1]
i_unsort = np.argsort(i_sort)
L_cumsum = L[i_sort].cumsum()
L_cumsum /= L_cumsum[-1]
xbins = 0.5 * (xbins[1:] + xbins[:-1])
ybins = 0.5 * (ybins[1:] + ybins[:-1])
return xbins, ybins, L_cumsum[i_unsort].reshape(shape)
def figgrid(ny, nx, figsize=None, left=0.1, right=0.85,
top=0.9, bottom=0.1, wspace=0.2, hspace=0.10):
"""Gridpars is
left, right
"""
from matplotlib import gridspec
if figsize is None:
figsize = (nx*4.5, ny*3)
fig = pl.figure(figsize=figsize)
axarray = np.zeros([ny, nx], dtype=np.dtype('O'))
gs1 = gridspec.GridSpec(ny, nx)
gs1.update(left=left, right=right, top=top, bottom=bottom,
wspace=wspace, hspace=hspace)
for i in range(ny):
for j in range(nx):
axarray[i, j] = fig.add_subplot(gs1[i, j])
return fig, axarray
def fill_between(x, y1, y2=0, ax=None, **kwargs):
"""Plot filled region between `y1` and `y2`.
This function works exactly the same as matplotlib's fill_between, except
that it also plots a proxy artist (specifically, a rectangle of 0 size)
so that it can be added it appears on a legend.
"""
ax = ax if ax is not None else pl.gca()
ax.fill_between(x, y1, y2, **kwargs)
p = pl.Rectangle((0, 0), 0, 0, **kwargs)
ax.add_patch(p)
return p
def logify(x):
return np.log10(x)
| 9,476 | 31.56701 | 95 |
py
|
prospector
|
prospector-master/prospect/utils/obsutils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" obsutils.py - Utilities for manipulating observational data, especially
ensuring the the required keys are present in the `obs` dictionary.
"""
import numpy as np
import warnings
np.errstate(invalid='ignore')
__all__ = ["fix_obs", "rectify_obs", "norm_spectrum", "logify_data"]
def fix_obs(obs, rescale_spectrum=False, normalize_spectrum=False,
logify_spectrum=False, grid_filters=False, **kwargs):
"""Make all required changes to the obs dictionary.
:param obs:
The `obs` dictionary that will be fit.
:param rescale_spectrum: (optional, default:False, deprecated)
Rescale the supplied spectrum to have a median value of 1. The value
used to rescale the spectrum is added as the `"rescale"` key in the
supplied `obs` dictionary.
:param normalize_spectrum: (optional, default:False, deprecated)
Renormalize the spectrum to give the supplied magnitude through the
filter specified by `obs["norm_band_name"]`. See `norm_spectrum()` for
details.
:param logify_spectrum: (optional, default:False, deprecated)
Take the log of the spectrum and associated uncertainties, for fitting
in log-space. Note this distorts errors.
:param grid_filters: (optional, default:False)
Switch to place all filter transmission curves on a common grid of
dlnlambda, to provide small speed gains in the filter projections. The
grid is calculated automatically from the supplied filters, and is
added to the `obs` dictionary as the `"lnwavegrid"` key.
:returns obs:
An obs dictionary that has all required keys and that has been modified
according to the options described above.
"""
obs = rectify_obs(obs)
obs['ndof'] = 0
if obs['spectrum'] is not None:
obs['ndof'] += int(obs['mask'].sum())
if (rescale_spectrum):
sc = np.median(obs['spectrum'][obs['mask']])
obs['rescale'] = sc
obs['spectrum'] /= sc
obs['unc'] /= sc
if (normalize_spectrum):
sp_norm, pivot_wave = norm_spectrum(obs, **kwargs)
obs['normalization_guess'] = sp_norm
obs['pivot_wave'] = pivot_wave
if (logify_spectrum):
s, u, m = logify_data(obs['spectrum'], obs['unc'], obs['mask'])
obs['spectrum'] = s
obs['unc'] = u
obs['mask'] = m
obs['logify_spectrum'] = True
else:
obs['unc'] = None
if obs['maggies'] is not None:
obs['ndof'] += int(obs['phot_mask'].sum())
if grid_filters:
wlo, whi, dlo = [], [], []
for f in obs['filters']:
dlnlam = np.gradient(f.wavelength)/f.wavelength
wlo.append(f.wavelength.min())
dlo.append(dlnlam.min())
whi.append(f.wavelength.max())
wmin = np.min(wlo)
wmax = np.max(whi)
dlnlam = np.min(dlo)
for f in obs['filters']:
f.gridify_transmission(dlnlam, wmin)
f.get_properties()
obs['lnwavegrid'] = np.exp(np.arange(np.log(wmin), np.log(wmax)+dlnlam, dlnlam))
else:
obs['maggies_unc'] = None
assert obs["ndof"] > 0, "No valid data to fit: check the sign of the masks."
return obs
def logify_data(data, sigma, mask):
"""Convert data to ln(data) and uncertainty to fractional uncertainty for
use in additive GP models. This involves filtering for negative data
values and replacing them with something else.
"""
tiny = 0.01 * data[data > 0].min()
bad = data < tiny
nbad = bad.sum()
if nbad == 0:
return np.log(data), sigma/data, mask
else:
message = ("Setting {0} datapoints to {1} to insure "
"positivity.".format(nbad, tiny))
# warnings.warn(message)
print(message)
data[bad] = tiny
sigma[bad] = np.sqrt(sigma[bad]**2 + (data[bad] - tiny)**2)
return np.log(data), sigma/data, mask
def norm_spectrum(obs, norm_band_name='f475w', **kwargs):
"""Initial guess of spectral normalization using photometry.
This obtains the multiplicative factor required to reproduce the the
photometry in one band from the observed spectrum (model.obs['spectrum'])
using the bsfh unit conventions. Thus multiplying the observed spectrum by
this factor gives a spectrum that is approximately erg/s/cm^2/AA at the
central wavelength of the normalizing band.
The inverse of the multiplication factor is saved as a fixed parameter to
be used in producing the mean model.
"""
from sedpy import observate
norm_band = [i for i, f in enumerate(obs['filters'])
if norm_band_name in f.name][0]
synphot = observate.getSED(obs['wavelength'], obs['spectrum'], obs['filters'])
synphot = np.atleast_1d(synphot)
# Factor by which the observed spectra should be *divided* to give you the
# photometry (or the cgs apparent spectrum), using the given filter as
# truth. Alternatively, the factor by which the model spectrum (in cgs
# apparent) should be multiplied to give you the observed spectrum.
norm = 10**(-0.4 * synphot[norm_band]) / obs['maggies'][norm_band]
# Pivot the calibration polynomial near the filter used for approximate
# normalization
pivot_wave = obs['filters'][norm_band].wave_effective
# model.params['pivot_wave'] = 4750.
return norm, pivot_wave
def rectify_obs(obs):
"""Make sure the passed obs dictionary conforms to code expectations,
and make simple fixes when possible.
"""
k = obs.keys()
if 'maggies' not in k:
obs['maggies'] = None
obs['maggies_unc'] = None
if 'spectrum' not in k:
obs['spectrum'] = None
obs['unc'] = None
if obs['maggies'] is not None:
assert (len(obs['filters']) == len(obs['maggies']))
assert ('maggies_unc' in k)
assert ((len(obs['maggies']) == len(obs['maggies_unc'])) or
(np.size(obs['maggies_unc'] == 1)))
m = obs.get('phot_mask', np.ones(len(obs['maggies']), dtype=bool))
obs['phot_mask'] = (m * np.isfinite(obs['maggies']) *
np.isfinite(obs['maggies_unc']) *
(obs['maggies_unc'] > 0))
try:
obs['filternames'] = [f.name for f in obs['filters']]
except:
pass
if 'logify_spectrum' not in k:
obs['logify_spectrum'] = False
if obs['spectrum'] is not None:
assert (len(obs['wavelength']) == len(obs['spectrum']))
assert ('unc' in k)
np.errstate(invalid='ignore')
m = obs.get('mask', np.ones(len(obs['wavelength']), dtype=bool))
obs['mask'] = (m * np.isfinite(obs['spectrum']) *
np.isfinite(obs['unc']) * (obs['unc'] > 0))
return obs
def generate_mock(model, sps, mock_info):
"""Generate a mock data set given model, mock parameters, wavelength grid,
and filter set. Very old and unused.
"""
# Generate mock spectrum and photometry given mock parameters, and
# Apply calibration.
# NEED TO DEAL WITH FILTERNAMES ADDED FOR SPS_BASIS
obs = {'wavelength': mock_info['wavelength'],
'filters': mock_info['filters']}
model.configure(**mock_info['params'])
mock_theta = model.theta.copy()
# print('generate_mock: mock_theta={}'.format(mock_theta))
s, p, x = model.mean_model(mock_theta, obs, sps=sps)
cal = model.calibration(mock_theta, obs)
if 'calibration' in mock_info:
cal = mock_info['calibration']
s *= cal
model.configure()
# Add noise to the mock data
if mock_info['filters'] is not None:
p_unc = p / mock_info['phot_snr']
noisy_p = (p + p_unc * np.random.normal(size=len(p)))
obs['maggies'] = noisy_p
obs['maggies_unc'] = p_unc
obs['phot_mask'] = np.ones(len(obs['filters']), dtype=bool)
else:
obs['maggies'] = None
if mock_info['wavelength'] is not None:
s_unc = s / mock_info.get('spec_snr', 10.0)
noisy_s = (s + s_unc * np.random.normal(size=len(s)))
obs['spectrum'] = noisy_s
obs['unc'] = s_unc
obs['mask'] = np.ones(len(obs['wavelength']), dtype=bool)
else:
obs['spectrum'] = None
obs['mask'] = None
return obs
| 8,493 | 37.261261 | 92 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.