repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
lm-intervention
|
lm-intervention-master/attention_figures1.py
|
"""
Creates figures for attention intervention analysis from JSON files:
- Stacked bar chart with direct/indirect/total effects
- Heatmap for head-level effects
- Barplot for layer-level effects
- Combined heatmap/barplot for head- and layer-level effects
"""
import json
import os
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
from attention_utils import topk_indices
structure_to_title = {'simple': 'simple agreement',
'distractor': 'distractor',
'distractor_1': 'distractor (1)',
'singular': 'pp (singular)',
'plural': 'pp (plural)',
'pp': 'pp',
'rc_singular': 'across relative clause (singular)',
'rc_plural': 'across relative clause (plural)',
'rc': 'across relative clause',
'within_rc_singular': 'within relative clause (singular)',
'within_rc_plural': 'within relative clause (plural)',
'within_rc': 'within relative clause'}
def save_figures(data, source, model_version, filter, suffix=None, k=10):
# Load data from json obj
if source in ('rc', 'within_rc', 'pp'):
results = data[0]['results']
results.extend(data[1]['results'])
else:
results = data['results']
df = pd.DataFrame(results)
# Aggregate by head
# Convert column to 3d ndarray (num_examples x num_layers x num_heads)
indirect_by_head = np.stack(df['indirect_effect_head'].to_numpy())
direct_by_head = np.stack(df['direct_effect_head'].to_numpy())
# Average by head
mean_indirect_by_head = indirect_by_head.mean(axis=0)
std_indirect_by_head = indirect_by_head.std(axis=0)
mean_direct_by_head = direct_by_head.mean(axis=0)
std_direct_by_head = direct_by_head.std(axis=0)
# Select top k heads by indirect effect
topk_inds = topk_indices(mean_indirect_by_head, k)
# Aggregate by layer
# Convert column to 2d ndarray (num_examples x num_layers)
indirect_by_layer = np.stack(df['indirect_effect_layer'].to_numpy())
direct_by_layer = np.stack(df['direct_effect_layer'].to_numpy())
mean_indirect_by_layer = indirect_by_layer.mean(axis=0)
mean_direct_by_layer = direct_by_layer.mean(axis=0)
n_layers = indirect_by_layer.shape[1]
plt.rc('figure', titlesize=20)
# Plot stacked bar chart
palette = sns.color_palette()#('muted')
plt.figure(num=1, figsize=(5, 2))
topk_direct = []
topk_indirect = []
labels = []
for ind in topk_inds:
layer, head = np.unravel_index(ind, mean_indirect_by_head.shape)
topk_indirect.append(mean_indirect_by_head[layer, head])
topk_direct.append(mean_direct_by_head[layer, head])
labels.append(f'{layer}-{head}')
width = 0.6
inds = range(k)
p1 = plt.bar(inds, topk_indirect, width, linewidth=0, color=palette[1])
p2 = plt.bar(inds, topk_direct, width, bottom=topk_indirect, linewidth=0, color=palette[0])
plt.ylabel('Effect', size=11)
plt.title('Effects of top heads', fontsize=11)
plt.xticks(inds, labels, size=10)
plt.yticks(size=10)
if source in ("rc", "within_rc", "pp"):
p3 = plt.axhline(data[0]['mean_total_effect'] + data[1]['mean_total_effect'] / 2, linestyle='--')
else:
p3 = plt.axhline(data['mean_total_effect'], linestyle='--')
plt.legend((p3, p2[0], p1[0]), ('Total', 'Direct', 'Indirect'), loc='upper right', fontsize=11,
bbox_to_anchor=(.99, 0.90))
sns.despine()
path = 'attention_figures/stacked_bar_charts'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
annot = False
# Plot heatmap for direct and indirect effect
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
#mean_effect = mean_indirect_by_head
mean_effect = std_indirect_by_head
else:
#mean_effect = mean_direct_by_head
mean_effect = std_direct_by_head
ax = sns.heatmap(mean_effect, rasterized=True, annot=annot, annot_kws={"size": 9}, fmt=".2f", square=True)
ax.set(xlabel='Head', ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
plt.figure(num=1, figsize=(7, 5))
path = f'attention_figures/heat_maps_std_{effect_type}'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
# Plot layer-level bar chart for indirect and direct effects
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
mean_effect = mean_indirect_by_layer
else:
mean_effect = mean_direct_by_layer
plt.figure(num=1, figsize=(5, 5))
ax = sns.barplot(x=mean_effect, y=list(range(n_layers)), orient="h", color="#4472C4")
ax.set(ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
path = f'attention_figures/layer_{effect_type}'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
# Plot combined heatmap and barchart for direct and indirect effects
for do_sort in False, True:
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
effect_head = mean_indirect_by_head
effect_layer = mean_indirect_by_layer
if do_sort:
effect_head = -np.sort(-effect_head) # Sort indirect effects within each layer in descending order
else:
if do_sort:
continue
effect_head = mean_direct_by_head
effect_layer = mean_direct_by_layer
fig = plt.figure(figsize=(3, 2.2))
if model_version == 'distilgpt2':
ax1 = plt.subplot2grid((100, 85), (0, 0), colspan=62, rowspan=99)
ax2 = plt.subplot2grid((100, 85), (32, 69), colspan=17, rowspan=35)
elif model_version in ('gpt2', 'gpt2_random'):
ax1 = plt.subplot2grid((100, 85), (0, 0), colspan=65, rowspan=99)
ax2 = plt.subplot2grid((100, 85), (12, 70), colspan=15, rowspan=75)
elif model_version == 'gpt2-medium':
ax1 = plt.subplot2grid((100, 85), (0, 5), colspan=55, rowspan=99)
ax2 = plt.subplot2grid((100, 85), (2, 64), colspan=17, rowspan=95)
elif model_version == 'gpt2-large':
ax1 = plt.subplot2grid((100, 85), (0, 5), colspan=55, rowspan=96)
ax2 = plt.subplot2grid((100, 85), (0, 62), colspan=17, rowspan=97)
elif model_version == 'gpt2-xl':
ax1 = plt.subplot2grid((100, 85), (0, 5), colspan=55, rowspan=96)
ax2 = plt.subplot2grid((100, 85), (0, 62), colspan=17, rowspan=97)
heatmap = sns.heatmap(effect_head, center=0.0, ax=ax1, annot=annot, annot_kws={"size": 9}, fmt=".2f", square=True, cbar=False, linewidth=0.1, linecolor='#D0D0D0',
cmap = LinearSegmentedColormap.from_list('rg', ["#F14100", "white", "#3D4FC4"], N=256))
plt.setp(heatmap.get_yticklabels(), fontsize=7)
plt.setp(heatmap.get_xticklabels(), fontsize=7)
heatmap.tick_params(axis='x', pad=1, length=2)
heatmap.tick_params(axis='y', pad=1, length=2)
heatmap.yaxis.labelpad = 2
heatmap.invert_yaxis()
if model_version != 'gpt2-xl':
for i, label in enumerate(heatmap.xaxis.get_ticklabels()):
if i%2 == 1:
label.set_visible(False)
for i, label in enumerate(heatmap.yaxis.get_ticklabels()):
if i%2 == 1:
label.set_visible(False)
if do_sort:
heatmap.axes.get_xaxis().set_ticks([])
else:
if model_version == 'gpt2-xl':
every_nth = 2
for n, label in enumerate(ax1.xaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
for n, label in enumerate(ax1.yaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
# split axes of heatmap to put colorbar
ax_divider = make_axes_locatable(ax1)
if model_version in ('gpt2-large', 'gpt2-xl'):
cax = ax_divider.append_axes('left', size='7%', pad='45%')
else:
cax = ax_divider.append_axes('left', size='7%', pad='33%')
# # make colorbar for heatmap.
# # Heatmap returns an axes obj but you need to get a mappable obj (get_children)
cbar = colorbar(ax1.get_children()[0], cax=cax, orientation='vertical')
cax.yaxis.set_ticks_position('left')
cbar.solids.set_edgecolor("face")
cbar.ax.tick_params(labelsize=7, length=4, pad=2)
ax1.set_title(structure_to_title[source], size=6)
ax1.set_xlabel('Head', size=6)
ax1.set_ylabel('Layer', size=6)
for _, spine in ax1.spines.items():
spine.set_visible(True)
ax2.set_title(' Layer Effect', size=6)
bp = sns.barplot(x=effect_layer, ax=ax2, y=list(range(n_layers)), color="#3D4FC4", orient="h")
plt.setp(bp.get_xticklabels(), fontsize=7)
bp.tick_params(axis='x', pad=1, length=3)
ax2.invert_yaxis()
ax2.set_yticklabels([])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.axvline(0, linewidth=.85, color='black')
path = f'attention_figures/heat_maps_with_bar_{effect_type}{"_sorted" if do_sort else ""}'
if not os.path.exists(path):
os.makedirs(path)
fname = f'{path}/{source}_{model_version}_{filter}.pdf'
plt.savefig(fname, format='pdf', bbox_inches='tight')
plt.close()
def main():
sns.set_context("paper")
sns.set_style("white")
#model_versions = ['distilgpt2', 'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
model_versions = ['gpt2']
filters = ['unfiltered']
#filters = ['filtered']
structures = ['distractor', 'distractor_1', 'singular', 'plural', 'rc_singular', 'rc_plural', \
'within_rc_singular', 'within_rc_plural', 'simple']
# process structural bias
for model_version in model_versions:
for filter in filters:
for structure in structures:
fname = f"attention_results/{structure}/attention_intervention_{model_version}_{filter}.json"
if not os.path.exists(fname):
print("File does not exist:", fname)
continue
with open(fname) as f:
if structure in ("rc", "within_rc", "pp"):
file_str = f.readline()
json_strs = file_str.split("]},")
json_strs[0] += "]}"
data = [json.loads(json_str) for json_str in json_strs]
else:
data = json.load(f)
save_figures(data, structure, model_version, filter)
if __name__ == '__main__':
main()
| 11,984 | 46.371542 | 174 |
py
|
lm-intervention
|
lm-intervention-master/analysis.py
|
import os
import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
import sys
import seaborn as sns
sns.set_context('talk')
sns.set_style('whitegrid')
PATH = sys.argv[1]
FIGURES_PATH = sys.argv[2]
by_feather = sys.argv[3].lower() == 'true'
MODELS = ['Distil', 'Small', 'Medium', 'Large', 'XL']
CHUNKSIZE = 100000
EFFECT_TYPES = ['Indirect', 'Direct']
EXAMPLE_TYPES = ['None', 'Distractor', 'Plural attractor',
'Singular attractor']
COLS = ['Layer', 'Neuron', 'Random', 'Model size', 'Intervening tokens',
'Effect type']
FORMAT = '.pdf'
def get_size(f):
for m in MODELS:
if m.lower() in f:
return m
return 'Small'
def get_example_type(f):
for et in EXAMPLE_TYPES:
if et.lower().split()[0] in f:
return et
def load_dataframe_and_calculate_effects(by_feather=False):
files = glob(PATH + '*.csv')
preloaded = glob(PATH + '*.feather')
dfs = []
if by_feather:
dfs = [pd.read_feather(f) for f in preloaded]
else:
for f in tqdm(files, desc='Loading files', leave=False):
df = None
feather = f.replace('csv', 'feather')
if feather in preloaded:
df = pd.read_feather(feather)
else:
df = pd.concat(tqdm(pd.read_csv(f, chunksize=CHUNKSIZE),
leave=False, desc='Loading dataframe for ' + f))
df.to_feather(feather)
df['Layer'] = df.layer
df['Neuron'] = df.neuron
df['Random'] = 'random' in f
df['Model size'] = get_size(f)
df['Intervening tokens'] = get_example_type(f)
df['Effect type'] = 'Indirect' if 'indirect' in f else 'Direct'
df['Yz'] = df.candidate2_prob / df.candidate1_prob
df['Singular grammaticality'] = df.candidate2_base_prob \
/ df.candidate1_base_prob
df['Effect'] = df['Yz'] / df['Singular grammaticality'] - 1
df['Plural grammaticality'] = df.candidate1_alt1_prob \
/ df.candidate2_alt1_prob
df['Total effect'] = 1 \
/ (df['Plural grammaticality']
* df['Singular grammaticality']) \
- 1
neurons = ['Neuron', 'Layer']
df = df.set_index(neurons)
neurons_per_layer = len(df.groupby('Neuron').mean().index)
idx = df.groupby(neurons).mean().sort_values('Effect')\
.groupby('Layer')\
.tail(int(neurons_per_layer*0.05)).index
df['Top 5 percent'] = df.index.isin(idx)
dfs.append(df)
df = pd.concat(dfs).reset_index()
return df
def save_nie_by_layer_plot(df):
print('Plotting nie by layer...')
try:
data = df[(df['Effect type'] == 'Indirect') & df['Top 5 percent']]\
.groupby(COLS).mean().reset_index()
g = sns.FacetGrid(data=data,
col='Random', col_order=[False, True],
row='Intervening tokens', row_order=EXAMPLE_TYPES,
hue='Model size', hue_order=MODELS,
height=5, aspect=2,
sharey=False)\
.map(sns.lineplot, 'Layer', 'Effect')
[ax.legend() for ax in g.axes.flatten()]
title = f'Indirect effects of top 5 percent of neurons by layer'
plt.gcf().suptitle(title)
plt.tight_layout(rect=[0, 0, 1, 0.95])
plt.savefig(FIGURES_PATH + title.lower().replace(' ', '_') + FORMAT)
print('Success')
except Exception as e:
print(e)
def draw_heatmap(data,color):
pivot = data.groupby(['Layer', 'Neuron']).mean().reset_index()\
.pivot(index='Layer', columns='Neuron', values='Effect')
ax = sns.heatmap(pivot, rasterized=True)
ax.invert_yaxis()
def save_heatmaps(df):
print('Generating heatmaps...')
for et in EFFECT_TYPES:
for r in ['trained', 'random']:
f = ~df['Random'] if r == 'trained' else df['Random']
data = df[(df['Effect type'] == et) & f]
try:
sns.FacetGrid(data,
col='Model size', col_order=MODELS,
row='Intervening tokens', row_order=EXAMPLE_TYPES,
margin_titles=False,
aspect=2, height=5,
sharey=False, sharex=False)\
.map_dataframe(draw_heatmap)
title = f'{r.capitalize()} model {et.lower()} effect heatmaps'
plt.suptitle(title)
plt.tight_layout(rect=[0, 0, 1, 0.95])
plt.savefig(FIGURES_PATH
+ title.lower().replace(' ', '_')
+ FORMAT)
print('Success')
except Exception as e:
print(e)
def save_aggregate_total_effect_bar(df):
data = df[~df.Random & (df['Effect type'] == 'Indirect')]\
.groupby([c for c in COLS if c not in ['Layer', 'Neuron']]
+ ['base_string', 'candidate1'])\
.mean().reset_index()
sns.FacetGrid(data,
row='Intervening tokens', row_order=EXAMPLE_TYPES,
height=5, aspect=2,
sharey=True, sharex=False)\
.map(sns.barplot, 'Model size', 'Total effect',
orient='v', order=MODELS)\
.set(yscale='log')
title = 'Total effects'
plt.suptitle(title)
plt.tight_layout(rect=[0, 0, 1, 0.95])
plt.savefig(FIGURES_PATH + f'{title.lower().replace(" ", "_")}' + FORMAT)
def save_y_comparisons(df):
data = df[~df.Random & (df['Effect type'] == 'Indirect')]\
.groupby(['Model size', 'Intervening tokens'])\
.mean().reset_index()
sns.relplot(x='Singular grammaticality', y='Plural grammaticality',
hue='Intervening tokens', hue_order=EXAMPLE_TYPES,
size='Model size', size_order=reversed(MODELS),
data=data)
title = 'Model grammaticality'
plt.suptitle(title)
plt.tight_layout(rect=[0, 0, 0.60, 0.95])
plt.savefig(FIGURES_PATH + f'{title.lower().replace(" ", "_")}' + FORMAT)
if __name__ == "__main__":
df = load_dataframe_and_calculate_effects(by_feather=by_feather)
save_nie_by_layer_plot(df)
save_heatmaps(df)
save_aggregate_total_effect_bar(df)
save_y_comparisons(df)
| 6,514 | 37.779762 | 78 |
py
|
lm-intervention
|
lm-intervention-master/attention_intervention_model.py
|
"""
Changes the huggingface transformer attention module to allow interventions
in the attention distribution.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class AttentionOverride(nn.Module):
"""A copy of `modeling_gpt2.Attention` class, but with overridden attention values"""
def __init__(self, attention, attn_override, attn_override_mask):
"""
Args:
attention: instance of modeling_gpt2.Attention from which variables will be
copied.
attn_override: values to override the computed attention weights.
Shape is [num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [num_heads, seq_len, seq_len]
"""
super(AttentionOverride, self).__init__()
# Copy values from attention
self.output_attentions = attention.output_attentions
self.register_buffer("bias", attention._buffers["bias"])
self.n_head = attention.n_head
self.split_size = attention.split_size
self.scale = attention.scale
self.c_attn = attention.c_attn
self.c_proj = attention.c_proj
self.attn_dropout = attention.attn_dropout
self.resid_dropout = attention.resid_dropout
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
# attn_override and attn_override_mask are of shape
# (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is
# being overridden.
override_seq_len = self.attn_override_mask.shape[-1]
w[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
self.attn_override,
w[:, :, :override_seq_len, :override_seq_len],
)
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = (
layer_past[0].transpose(-2, -1),
layer_past[1],
) # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack(
(key.transpose(-2, -1), value)
) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class TXLAttentionOverride(nn.Module):
""" A copy of `modeling_transfo_xl.RelPartialLearnableMultiHeadAttn` class,
but with overridden attention values """
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_transfo_xl.RelPartialLearnableMultiHeadAttn
from which variables will be copied
attn_override: values to override the computed attention weights.
Shape is [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super(TXLAttentionOverride, self).__init__()
# Copy values from module
self.output_attentions = module.output_attentions
self.n_head = module.n_head
self.d_model = module.d_model
self.d_head = module.d_head
self.dropout = module.dropout
self.qkv_net = module.qkv_net
self.drop = module.drop
self.dropatt = module.dropatt
self.o_net = module.o_net
self.layer_norm = module.layer_norm
self.scale = module.scale
self.pre_lnorm = module.pre_lnorm
self.r_r_bias = module.r_r_bias
self.r_w_bias = module.r_w_bias
self.r_net = module.r_net
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = attn_mask == 1 # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = (
attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)
)
else:
attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# Intervention:
# attn_override and attn_override_mask are of shape (bsz, n_heads, query_seq_len, key_seq_len)
# attn_prob is of shape (query_seq_len, key_seq_len, bsz, n_heads)
_, _, override_q_len, override_k_len = self.attn_override_mask.shape
attn_prob[:override_q_len, :override_k_len, :, :] = torch.where(
self.attn_override_mask.permute(2, 3, 0, 1),
self.attn_override.permute(2, 3, 0, 1),
attn_prob[:override_q_len, :override_k_len, :, :])
# compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class XLNetAttentionOverride(nn.Module):
""" A copy of `modeling_xlnet.XLNetRelativeAttention` class,
but with overridden attention values """
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_xlnet.XLNetRelativeAttention
from which variables will be copied
attn_override: values to override the computed attention weights.
Tuple of content and query attentions (2-stream self-attention),
each of shape [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super().__init__()
self.output_attentions = module.output_attentions
# if config.d_model % config.n_head != 0:
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (config.d_model, config.n_head)
# )
self.n_head = module.n_head
self.d_head = module.d_head
self.d_model = module.d_model
self.scale = module.scale
self.q = module.q
self.k = module.k
self.v = module.v
self.o = module.o
self.r = module.r
self.r_r_bias = module.r_r_bias
self.r_s_bias = module.r_s_bias
self.r_w_bias = module.r_w_bias
self.seg_embed = module.seg_embed
self.layer_norm = module.layer_norm
self.dropout = module.dropout
# Set attention override values
self.content_attn_override, self.query_attn_override = attn_override
self.attn_override_mask = attn_override_mask
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, attn_override, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# Intervention:
# attn_override and attn_override_mask are of shape (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is being overridden
override_seq_len = self.attn_override_mask.shape[-1]
attn_prob[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
attn_override,
attn_prob[:, :, :override_seq_len, :override_seq_len])
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r,
attn_override=self.content_attn_override,
seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r,
attn_override=self.query_attn_override,
seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
assert False ### NEW
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
assert False ### NEW
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class BertAttentionOverride(nn.Module):
"""A copy of `modeling_bert.BertSelfAttention` class, but with overridden attention values"""
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_bert.BertSelfAttentionOverride
from which variables will be copied
attn_override: values to override the computed attention weights.
Shape is [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super().__init__()
# if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (config.hidden_size, config.num_attention_heads)
# )
self.output_attentions = module.output_attentions
self.num_attention_heads = module.num_attention_heads
self.attention_head_size = module.attention_head_size
self.all_head_size = module.all_head_size
self.query = module.query
self.key = module.key
self.value = module.value
self.dropout = module.dropout
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# Intervention:
# attn_override and attn_override_mask are of shape (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is being overridden
override_seq_len = self.attn_override_mask.shape[-1]
attention_probs[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
self.attn_override,
attention_probs[:, :, :override_seq_len, :override_seq_len])
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class DistilBertAttentionOverride(nn.Module):
"""A copy of `modeling_distilbert.MultiHeadSelfAttention` class, but with overridden attention values"""
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_distilbert.MultiHeadSelfAttention
from which variables will be copied
attn_override: values to override the computed attention weights.
Shape is [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super().__init__()
self.n_heads = module.n_heads
self.dim = module.dim
self.dropout = module.dropout
self.output_attentions = module.output_attentions
assert self.dim % self.n_heads == 0
self.q_lin = module.q_lin
self.k_lin = module.k_lin
self.v_lin = module.v_lin
self.out_lin = module.out_lin
self.pruned_heads = module.pruned_heads
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query, key, value, mask, head_mask=None):
"""
Parameters
----------
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Outputs
-------
weights: torch.tensor(bs, n_heads, seq_length, seq_length)
Attention weights
context: torch.tensor(bs, seq_length, dim)
Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
""" separate heads """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" group heads """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)
weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
# Intervention:
# attn_override and attn_override_mask are of shape (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is being overridden
override_seq_len = self.attn_override_mask.shape[-1]
weights[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
self.attn_override,
weights[:, :, :override_seq_len, :override_seq_len])
context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if self.output_attentions:
return (context, weights)
else:
return (context,)
| 29,231 | 39.998597 | 127 |
py
|
lm-intervention
|
lm-intervention-master/utils_num_agreement.py
|
import pandas as pd
from copy import deepcopy
def batch(iterable, bsize=1):
total_len = len(iterable)
for ndx in range(0, total_len, bsize):
yield list(iterable[ndx:min(ndx + bsize, total_len)])
def convert_results_to_pd(interventions, intervention_results, layer_fixed=None, neuron_fixed=None):
"""
Convert intervention results to data frame
Args:
interventions: dictionary from word (e.g., profession) to intervention
intervention_results: dictionary from word to intervention results
"""
results = []
for word in intervention_results:
intervention = interventions[word]
candidate1_base_prob, candidate2_base_prob,\
candidate1_alt1_prob, candidate2_alt1_prob,\
candidate1_probs, candidate2_probs = intervention_results[word]
# we have results for all layers and all neurons
results_base = {# strings
'word': word,
'base_string': intervention.base_strings[0],
'alt_string1': intervention.base_strings[1],
'candidate1': intervention.candidates[0],
'candidate2': intervention.candidates[1],
# base probs
'candidate1_base_prob': float(candidate1_base_prob),
'candidate2_base_prob': float(candidate2_base_prob),
'candidate1_alt1_prob': float(candidate1_alt1_prob),
'candidate2_alt1_prob': float(candidate2_alt1_prob)}
if layer_fixed is None:
for layer in range(candidate1_probs.size(0)):
for neuron in range(candidate1_probs.size(1)):
c1_prob, c2_prob = candidate1_probs[layer][neuron], candidate2_probs[layer][neuron]
results_single = deepcopy(results_base)
results_single.update({# strings
# intervention probs
'candidate1_prob': float(c1_prob),
'candidate2_prob': float(c2_prob),
'layer': layer,
'neuron': neuron})
results.append(results_single)
# we have results for all neurons in one layer
elif neuron_fixed is None:
for neuron in range(candidate1_probs.size(1)):
c1_prob, c2_prob = candidate1_probs[0][neuron], candidate2_probs[0][neuron]
results_single = deepcopy(results_base)
results_single.update({# strings
# intervention probs
'candidate1_prob': float(c1_prob),
'candidate2_prob': float(c2_prob),
'layer': layer_fixed,
'neuron': neuron})
results.append(results_single)
# we have result for a specific neuron and layer
else:
c1_prob, c2_prob = candidate1_probs, candidate2_probs
results_single = deepcopy(results_base)
results_single.update({# strings
# intervention probs
'candidate1_prob': float(c1_prob),
'candidate2_prob': float(c2_prob),
'layer': layer_fixed,
'neuron': neuron_fixed})
results.append(results_single)
return pd.DataFrame(results)
| 3,316 | 43.226667 | 101 |
py
|
lm-intervention
|
lm-intervention-master/attention_intervention_structural.py
|
"""Performs attention intervention on Winobias samples and saves results to JSON file."""
import json
import os
import random
import sys
from pandas import DataFrame
from transformers import (
GPT2Tokenizer, TransfoXLTokenizer, XLNetTokenizer
)
from attention_utils import perform_interventions, get_odds_ratio
from experiment_num_agreement import Model, Intervention
from vocab_utils import get_nouns, get_nouns2, get_verbs, get_verbs2, get_prepositions, \
get_preposition_nouns, get_adv1s, get_adv2s
import vocab_utils as vocab
def construct_templates(attractor):
templates = []
if attractor in ['singular', 'plural']:
for p in get_prepositions():
for ppns, ppnp in get_preposition_nouns():
ppn = ppns if attractor == 'singular' else ppnp
template = ' '.join(['The', '{}', p, 'the', ppn])
templates.append(template)
elif attractor in ('rc_singular', 'rc_plural', 'rc_singular_no_that', 'rc_plural_no_that'):
for noun2s, noun2p in get_nouns2():
noun2 = noun2s if attractor.startswith('rc_singular') else noun2p
for verb2s, verb2p in get_verbs2():
verb2 = verb2s if attractor.startswith('rc_singular') else verb2p
if attractor.endswith('no_that'):
template = ' '.join(['The', '{}', 'the', noun2, verb2])
else:
template = ' '.join(['The', '{}', 'that', 'the', noun2, verb2])
# templates.append(' '.join(['The', '{}', 'that', 'the', noun2s, verb2s]))
# templates.append(' '.join(['The', '{}', 'that', 'the', noun2p, verb2p]))
templates.append(template)
elif attractor in ('within_rc_singular', 'within_rc_plural', 'within_rc_singular_no_that', 'within_rc_plural_no_that'):
for ns, np in vocab.get_nouns():
noun = ns if attractor.startswith('within_rc_singular') else np
if attractor.endswith('no_that'):
template = ' '.join(['The', noun, 'the', '{}'])
else:
template = ' '.join(['The', noun, 'that', 'the', '{}'])
# templates.append(' '.join(['The', ns, 'that', 'the', '{}']))
# templates.append(' '.join(['The', np, 'that', 'the', '{}']))
templates.append(template)
elif attractor == 'distractor':
for adv1 in get_adv1s():
for adv2 in get_adv2s():
templates.append(' '.join(['The', '{}', adv1, 'and', adv2]))
elif attractor == 'distractor_1':
for adv1 in get_adv1s():
templates.append(' '.join(['The', '{}', adv1]))
else:
templates = ['The {}']
return templates
def load_structural_interventions(tokenizer, device, attractor, seed, examples):
# build list of interventions
interventions = {}
all_word_count = 0
used_word_count = 0
templates = construct_templates(attractor)
for temp in templates:
if attractor.startswith('within_rc'):
for noun2s, noun2p in get_nouns2():
for v_singular, v_plural in vocab.get_verbs():
all_word_count += 1
try:
intervention_name = '_'.join([temp, noun2s, v_singular])
interventions[intervention_name] = Intervention(
tokenizer,
temp,
[noun2s, noun2p],
[v_singular, v_plural],
device=device)
used_word_count += 1
except Exception as e:
pass
else:
for ns, np in vocab.get_nouns():
for v_singular, v_plural in vocab.get_verbs():
all_word_count += 1
try:
intervention_name = '_'.join([temp, ns, v_singular])
interventions[intervention_name] = Intervention(
tokenizer,
temp,
[ns, np],
[v_singular, v_plural],
device=device)
used_word_count += 1
except Exception as e:
pass
print(f"\t Only used {used_word_count}/{all_word_count} nouns due to tokenizer")
if examples > 0 and len(interventions) >= examples:
random.seed(seed)
interventions = {k: v
for k, v in random.sample(interventions.items(), examples)}
return interventions
def get_interventions_structural(gpt2_version, do_filter, model, tokenizer,
device='cuda', filter_quantile=0.25, seed=3, attractor=None, examples=100):
interventions = load_structural_interventions(tokenizer, device, attractor, seed, examples)
intervention_list = [intervention for intervention in interventions.values()]
interventions = intervention_list
json_data = {'model_version': gpt2_version,
'do_filter': do_filter,
'num_examples_loaded': len(interventions)}
if do_filter:
df = DataFrame({'odds_ratio': [get_odds_ratio(intervention, model) for intervention in intervention_list]})
df_expected = df[df.odds_ratio > 1]
threshold = df_expected.odds_ratio.quantile(filter_quantile)
filtered_interventions = []
assert len(intervention_list) == len(df)
for i in range(len(intervention_list)):
intervention = intervention_list[i]
odds_ratio = df.iloc[i].odds_ratio
if odds_ratio > threshold:
filtered_interventions.append(intervention)
print(f'Num examples with odds ratio > 1: {len(df_expected)} / {len(intervention_list)}')
print(
f'Num examples with odds ratio > {threshold:.4f} ({filter_quantile} quantile): {len(filtered_interventions)} / {len(intervention_list)}')
json_data['num_examples_aligned'] = len(df_expected)
json_data['filter_quantile'] = filter_quantile
json_data['threshold'] = threshold
interventions = filtered_interventions
json_data['num_examples_analyzed'] = len(interventions)
return interventions, json_data
def intervene_attention(gpt2_version, do_filter, attractor, device='cuda', filter_quantile=0.25, examples=100,\
seed=3, random_weights=False):
model = Model(output_attentions=True, gpt2_version=gpt2_version,
device=device, random_weights=random_weights)
tokenizer = (GPT2Tokenizer if model.is_gpt2 else
TransfoXLTokenizer if model.is_txl else
# XLNetTokenizer if model.is_xlnet
XLNetTokenizer
).from_pretrained(gpt2_version)
interventions, json_data = get_interventions_structural(gpt2_version, do_filter,
model, tokenizer,
device, filter_quantile,
seed=seed, attractor=attractor,
examples=examples)
results = perform_interventions(interventions, model)
json_data['mean_total_effect'] = DataFrame(results).total_effect.mean()
json_data['mean_model_indirect_effect'] = DataFrame(results).indirect_effect_model.mean()
json_data['mean_model_direct_effect'] = DataFrame(results).direct_effect_model.mean()
filter_name = 'filtered' if do_filter else 'unfiltered'
if random_weights:
gpt2_version += '_random'
fname = f"attention_results/{attractor}/attention_intervention_{gpt2_version}_{filter_name}.json"
base_path = '/'.join(fname.split('/')[:-1])
if not os.path.exists(base_path):
os.makedirs(base_path)
json_data['results'] = results
with open(fname, 'w') as f:
json.dump(json_data, f)
if __name__ == "__main__":
model = sys.argv[1]
device = sys.argv[2]
filter_quantile = float(sys.argv[3])
random_weights = sys.argv[4] == 'random'
attractor = sys.argv[5]
seed = int(sys.argv[6])
examples = int(sys.argv[7])
#intervene_attention(model, True, attractor, device=device, filter_quantile=filter_quantile, examples=examples, \
# seed=seed, random_weights=random_weights)
intervene_attention(model, False, attractor, device=device, filter_quantile=0.0, examples=examples, seed=seed, \
random_weights=random_weights)
| 8,648 | 46.262295 | 149 |
py
|
lm-intervention
|
lm-intervention-master/attention_figures2.py
|
"""
Creates figures for attention intervention analysis from JSON files:
- Stacked bar chart with direct/indirect/total effects
- Heatmap for head-level effects
- Barplot for layer-level effects
- Combined heatmap/barplot for head- and layer-level effects
"""
import json
import os
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
from attention_utils import topk_indices
structure_to_title = {'simple': 'simple agreement',
'distractor': 'distractor',
'distractor_1': 'distractor (1)',
'singular': 'pp (singular)',
'plural': 'pp (plural)',
'pp': 'pp',
'rc_singular': 'across relative clause (singular)',
'rc_plural': 'across relative clause (plural)',
'rc': 'across relative clause',
'within_rc_singular': 'within relative clause (singular)',
'within_rc_plural': 'within relative clause (plural)',
'within_rc': 'within relative clause'}
def save_figures(data, source, model_version, filter, suffix=None, k=10):
# Load data from json obj
if source in ('rc', 'within_rc', 'pp'):
results = data[0]['results']
results.extend(data[1]['results'])
else:
results = data['results']
df = pd.DataFrame(results)
# Aggregate by head
# Convert column to 3d ndarray (num_examples x num_layers x num_heads)
indirect_by_head = np.stack(df['indirect_effect_head'].to_numpy())
direct_by_head = np.stack(df['direct_effect_head'].to_numpy())
# Average by head
mean_indirect_by_head = indirect_by_head.mean(axis=0)
mean_direct_by_head = direct_by_head.mean(axis=0)
# Select top k heads by indirect effect
topk_inds = topk_indices(mean_indirect_by_head, k)
# Aggregate by layer
# Convert column to 2d ndarray (num_examples x num_layers)
indirect_by_layer = np.stack(df['indirect_effect_layer'].to_numpy())
direct_by_layer = np.stack(df['direct_effect_layer'].to_numpy())
mean_indirect_by_layer = indirect_by_layer.mean(axis=0)
# std_indirect_by_layer = indirect_by_layer.std(axis=0)
std_indirect_by_layer = stats.sem(indirect_by_layer, axis=0)
mean_direct_by_layer = direct_by_layer.mean(axis=0)
# std_direct_by_layer = direct_by_layer.std(axis=0)
std_direct_by_layer = stats.sem(direct_by_layer, axis=0)
n_layers = indirect_by_layer.shape[1]
plt.rc('figure', titlesize=20)
'''
# Plot stacked bar chart
palette = sns.color_palette()#('muted')
plt.figure(num=1, figsize=(5, 2))
topk_direct = []
topk_indirect = []
labels = []
for ind in topk_inds:
layer, head = np.unravel_index(ind, mean_indirect_by_head.shape)
topk_indirect.append(mean_indirect_by_head[layer, head])
topk_direct.append(mean_direct_by_head[layer, head])
labels.append(f'{layer}-{head}')
width = 0.6
inds = range(k)
p1 = plt.bar(inds, topk_indirect, width, linewidth=0, color=palette[1])
p2 = plt.bar(inds, topk_direct, width, bottom=topk_indirect, linewidth=0, color=palette[0])
plt.ylabel('Effect', size=11)
plt.title('Effects of top heads', fontsize=11)
plt.xticks(inds, labels, size=10)
plt.yticks(size=10)
if source in ("rc", "within_rc", "pp"):
p3 = plt.axhline(data[0]['mean_total_effect'] + data[1]['mean_total_effect'] / 2, linestyle='--')
else:
p3 = plt.axhline(data['mean_total_effect'], linestyle='--')
plt.legend((p3, p2[0], p1[0]), ('Total', 'Direct', 'Indirect'), loc='upper right', fontsize=11,
bbox_to_anchor=(.99, 0.90))
sns.despine()
path = 'attention_figures/stacked_bar_charts'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
annot = False
'''
annot = False
'''
# Plot heatmap for direct and indirect effect
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
mean_effect = mean_indirect_by_head
else:
mean_effect = mean_direct_by_head
ax = sns.heatmap(mean_effect, rasterized=True, annot=annot, annot_kws={"size": 9}, fmt=".2f", square=True, \
vmin=-.016, vmax=.016, cmap = LinearSegmentedColormap.from_list('rg', ["#F14100", "white", "#3D4FC4"], N=256))
ax.set(xlabel='Head', ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
plt.figure(num=1, figsize=(7, 5))
path = f'attention_figures/heat_maps_{effect_type}_limit'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
# Plot layer-level bar chart for indirect and direct effects
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
mean_effect = mean_indirect_by_layer
else:
mean_effect = mean_direct_by_layer
plt.figure(num=1, figsize=(5, 5))
ax = sns.barplot(x=mean_effect, y=list(range(n_layers)), orient="h", color="#4472C4")
ax.set(ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
path = f'attention_figures/layer_{effect_type}_limit'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
'''
# Plot line graph of layer vs. mean effect across heads
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
mean_effect = mean_indirect_by_layer
std_effect = std_indirect_by_layer
else:
mean_effect = mean_direct_by_layer
std_effect = std_direct_by_layer
sns.set_theme(style="darkgrid")
x = list(range(n_layers))
plt.plot(x, mean_effect, 'b-')
plt.fill_between(x, mean_effect - std_effect, mean_effect + std_effect, color='b', alpha=0.15)
# ax = sns.lineplot(x=list(range(n_layers)), y=mean_effect)
# ax = sns.lineplot(y=y, ci="sd", data=df)
if effect_type == 'indirect':
plt.axhline(0, ls='--')
# plt.set(xlabel='Layer', ylabel='Mean effect across heads')
path = f'attention_figures/line_{effect_type}'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
# Experimental graphs
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
mean_effect = mean_indirect_by_head
# std_effect = std_indirect_by_layer
else:
mean_effect = mean_direct_by_head
# std_effect = std_direct_by_layer
sns.set_theme(style="darkgrid")
ax = sns.displot(mean_effect, kind='kde')
# ax.set(xlabel='Head', ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
# ax = sns.lineplot(y=y, ci="sd", data=df)
# ax.axhline(0, ls='--')
# ax.set(xlabel='Layer', ylabel='Mean effect across heads')
path = f'attention_figures/dist_{effect_type}'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{source}_{model_version}_{filter}.pdf', format='pdf')
plt.close()
'''
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
mean_effect = mean_indirect_by_layer
else:
mean_effect = mean_direct_by_layer
'''
'''
# Plot combined heatmap and barchart for direct and indirect effects
for do_sort in False, True:
for effect_type in ('indirect', 'direct'):
if effect_type == 'indirect':
effect_head = mean_indirect_by_head
effect_layer = mean_indirect_by_layer
if do_sort:
effect_head = -np.sort(-effect_head) # Sort indirect effects within each layer in descending order
else:
if do_sort:
continue
effect_head = mean_direct_by_head
effect_layer = mean_direct_by_layer
fig = plt.figure(figsize=(3, 2.2))
if model_version == 'distilgpt2':
ax1 = plt.subplot2grid((100, 85), (0, 0), colspan=62, rowspan=99)
ax2 = plt.subplot2grid((100, 85), (32, 69), colspan=17, rowspan=35)
elif model_version in ('gpt2', 'gpt2_random'):
ax1 = plt.subplot2grid((100, 85), (0, 0), colspan=65, rowspan=99)
ax2 = plt.subplot2grid((100, 85), (12, 70), colspan=15, rowspan=75)
elif model_version == 'gpt2-medium':
ax1 = plt.subplot2grid((100, 85), (0, 5), colspan=55, rowspan=99)
ax2 = plt.subplot2grid((100, 85), (2, 64), colspan=17, rowspan=95)
elif model_version == 'gpt2-large':
ax1 = plt.subplot2grid((100, 85), (0, 5), colspan=55, rowspan=96)
ax2 = plt.subplot2grid((100, 85), (0, 62), colspan=17, rowspan=97)
elif model_version == 'gpt2-xl':
ax1 = plt.subplot2grid((100, 85), (0, 5), colspan=55, rowspan=96)
ax2 = plt.subplot2grid((100, 85), (0, 62), colspan=17, rowspan=97)
heatmap = sns.heatmap(effect_head, center=0.0, ax=ax1, annot=annot, annot_kws={"size": 9}, fmt=".2f", square=True, cbar=False, linewidth=0.1, linecolor='#D0D0D0',
cmap = LinearSegmentedColormap.from_list('rg', ["#F14100", "white", "#3D4FC4"], N=256))
plt.setp(heatmap.get_yticklabels(), fontsize=7)
plt.setp(heatmap.get_xticklabels(), fontsize=7)
heatmap.tick_params(axis='x', pad=1, length=2)
heatmap.tick_params(axis='y', pad=1, length=2)
heatmap.yaxis.labelpad = 2
heatmap.invert_yaxis()
if model_version != 'gpt2-xl':
for i, label in enumerate(heatmap.xaxis.get_ticklabels()):
if i%2 == 1:
label.set_visible(False)
for i, label in enumerate(heatmap.yaxis.get_ticklabels()):
if i%2 == 1:
label.set_visible(False)
if do_sort:
heatmap.axes.get_xaxis().set_ticks([])
else:
if model_version == 'gpt2-xl':
every_nth = 2
for n, label in enumerate(ax1.xaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
for n, label in enumerate(ax1.yaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
# split axes of heatmap to put colorbar
ax_divider = make_axes_locatable(ax1)
if model_version in ('gpt2-large', 'gpt2-xl'):
cax = ax_divider.append_axes('left', size='7%', pad='45%')
else:
cax = ax_divider.append_axes('left', size='7%', pad='33%')
# # make colorbar for heatmap.
# # Heatmap returns an axes obj but you need to get a mappable obj (get_children)
cbar = colorbar(ax1.get_children()[0], cax=cax, orientation='vertical')
cax.yaxis.set_ticks_position('left')
cbar.solids.set_edgecolor("face")
cbar.ax.tick_params(labelsize=7, length=4, pad=2)
ax1.set_title(structure_to_title[source], size=6)
ax1.set_xlabel('Head', size=6)
ax1.set_ylabel('Layer', size=6)
for _, spine in ax1.spines.items():
spine.set_visible(True)
ax2.set_title(' Layer Effect', size=6)
bp = sns.barplot(x=effect_layer, ax=ax2, y=list(range(n_layers)), color="#3D4FC4", orient="h")
plt.setp(bp.get_xticklabels(), fontsize=7)
bp.tick_params(axis='x', pad=1, length=3)
ax2.invert_yaxis()
ax2.set_yticklabels([])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.axvline(0, linewidth=.85, color='black')
path = f'attention_figures/heat_maps_with_bar_{effect_type}{"_sorted" if do_sort else ""}_limit'
if not os.path.exists(path):
os.makedirs(path)
fname = f'{path}/{source}_{model_version}_{filter}.pdf'
plt.savefig(fname, format='pdf', bbox_inches='tight')
plt.close()
'''
def main():
sns.set_context("paper")
sns.set_style("white")
#model_versions = ['distilgpt2', 'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
model_versions = ['gpt2']
#filters = ['filtered', 'unfiltered']
filters = ['filtered']
structures = ['distractor', 'distractor_1', 'singular', 'plural', 'rc_singular', 'rc_plural', \
'within_rc_singular', 'within_rc_plural', 'simple']
# process structural bias
for model_version in model_versions:
for filter in filters:
for structure in structures:
fname = f"attention_results/{structure}/attention_intervention_{model_version}_{filter}.json"
if not os.path.exists(fname):
print("File does not exist:", fname)
continue
with open(fname) as f:
if structure in ("rc", "within_rc", "pp"):
file_str = f.readline()
json_strs = file_str.split("]},")
json_strs[0] += "]}"
data = [json.loads(json_str) for json_str in json_strs]
else:
data = json.load(f)
save_figures(data, structure, model_version, filter)
if __name__ == '__main__':
main()
| 14,418 | 45.067093 | 174 |
py
|
lm-intervention
|
lm-intervention-master/make_feathers.py
|
import os
import pandas as pd
from glob import glob
from tqdm import tqdm
import sys
PATH = sys.argv[1]
MODELS = ['Distil', 'Small', 'Medium', 'Large', 'XL']
CHUNKSIZE = 100000
EXAMPLE_TYPES = ['None', 'Distractor', 'Plural attractor',
'Singular attractor']
def get_size(f):
for m in MODELS:
if m.lower() in f:
return m
return 'Small'
def get_example_type(f):
for et in EXAMPLE_TYPES:
if et.lower().split()[0] in f:
return et
def compute_effects_and_save():
files = glob(PATH + '*.csv')
preloaded = glob(PATH + '*.feather')
effects_dfs = []
agg_dfs = []
for f in tqdm(files, desc='Loading files', leave=False):
df = None
feather = f.replace('csv', 'feather')
if feather in preloaded:
df = pd.read_feather(feather)
else:
df = pd.concat(tqdm(pd.read_csv(f, chunksize=CHUNKSIZE),
leave=False, desc='Loading dataframe for ' + f))
df.to_feather(feather)
df['Layer'] = df.layer
df['Neuron'] = df.neuron
df = df.set_index(['Neuron', 'Layer'])
# Compute effects/measures
df['Yz'] = df.candidate2_prob / df.candidate1_prob
df['Singular grammaticality'] = df.candidate2_base_prob \
/ df.candidate1_base_prob
df['Effect'] = df['Yz'] / df['Singular grammaticality'] - 1
df['Plural grammaticality'] = df.candidate1_alt1_prob \
/ df.candidate2_alt1_prob
df['Total effect'] = 1 / (df['Plural grammaticality']
* df['Singular grammaticality']) - 1
# Averaged over examples
agg = df.groupby(['Neuron', 'Layer']).mean()
neurons_per_layer, _ = agg.index.max()
idx = agg.sort_values('Effect').groupby('Layer')\
.tail(int(neurons_per_layer*0.05)).index
agg['Top 5 percent'] = agg.index.isin(idx)
agg['Random'] = 'random' in f
agg['Model size'] = get_size(f)
agg['Intervening tokens'] = get_example_type(f)
agg['Effect type'] = 'Indirect' if 'indirect' in f else 'Direct'
agg_dfs.append(agg)
# Not averaged
df['Random'] = 'random' in f
df['Model size'] = get_size(f)
df['Intervening tokens'] = get_example_type(f)
df['Effect type'] = 'Indirect' if 'indirect' in f else 'Direct'
effects_dfs.append(df)
pd.concat(effects_dfs).reset_index().to_feather(PATH + 'effects.feather')
pd.concat(agg_dfs).reset_index().to_feather(PATH + 'agg.feather')
if __name__ == "__main__":
compute_effects_and_save()
| 2,629 | 33.155844 | 77 |
py
|
lm-intervention
|
lm-intervention-master/featherify.py
|
import pandas as pd
import os, sys
from glob import glob
from tqdm import tqdm
PATH = sys.argv[1]
dtypes = {
'word': 'category',
'base_string': 'category',
'alt_string1': 'category',
'candidate1': 'category',
'candidate2': 'category',
'layer': 'int32',
'neuron': 'int32',
}
files = list(filter(lambda x: x.endswith('.csv'), os.listdir(PATH)))
for f in tqdm(files):
pd.read_csv(PATH + f, dtype=dtypes)\
.to_feather(PATH + 'feathers/' + f.replace('csv','feather'))
| 488 | 21.227273 | 68 |
py
|
lm-intervention
|
lm-intervention-master/heatmaps.py
|
import pandas as pd
import matplotlib.pyplot as plt
import sys
import seaborn as sns
sns.set()
PATH = sys.argv[1]
FIGURES_PATH = sys.argv[2]
MODELS = ['Distil', 'Small', 'Medium', 'Large', 'XL']
EXAMPLE_TYPES = ['None', 'Distractor', 'Plural attractor',
'Singular attractor']
FORMAT = '.pdf'
def draw_heatmap(data,color):
pivot = data.pivot(index='Layer', columns='Neuron', values='Effect')
ax = sns.heatmap(pivot, rasterized=True)
ax.invert_yaxis()
def save_heatmaps():
print('Generating heatmaps...')
df = pd.read_feather(PATH + 'agg.feather')
data = df[(df['Effect type'] == 'Indirect') & ~df['Random']]
sns.FacetGrid(data,
col='Model size', col_order=MODELS,
row='Intervening tokens', row_order=EXAMPLE_TYPES,
margin_titles=False,
aspect=2, height=5,
sharey=False, sharex=False).map_dataframe(draw_heatmap)
title = f'Indirect effect heatmaps'
plt.suptitle(title)
plt.tight_layout(rect=[0, 0, 1, 0.95])
plt.savefig(FIGURES_PATH
+ title.lower().replace(' ', '_')
+ FORMAT)
plt.show()
if __name__ == "__main__":
save_heatmaps()
| 1,188 | 28 | 72 |
py
|
lm-intervention
|
lm-intervention-master/transformers_modified/modeling_transfo_xl.py
|
""" A copy of transformers/modeling_transfo_xl.py from the Huggingface
transformers library modified so that the attention module is called with
non-keyword arguments (to make those arguments accessible to the hook).
"""
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
from transformers.modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP = {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update(
{
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
}
)
for i, (out_l, proj_l, tie_proj) in enumerate(
zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({layer_str + "b": out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
if not tie_proj:
tf_to_pt_map.update({layer_str + "proj": proj_l})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name or "proj" in name:
array = np.transpose(array)
if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super().__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class RelPartialLearnableMultiHeadAttn(nn.Module):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
tgt_len=None,
ext_len=None,
mem_len=None,
pre_lnorm=False,
r_r_bias=None,
r_w_bias=None,
output_attentions=False,
layer_norm_epsilon=1e-5,
):
super().__init__()
self.output_attentions = output_attentions
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = attn_mask == 1 # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = (
attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)
)
else:
attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
super().__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
)
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None):
### MODIFIED ###
# attn_outputs = self.dec_attn(dec_inp, r, attn_mask=dec_attn_mask, mems=mems, head_mask=head_mask)
attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask)
### MODIFIED ###
ff_output = self.pos_ff(attn_outputs[0])
outputs = [ff_output] + attn_outputs[1:]
return outputs
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = TransfoXLConfig
pretrained_model_archive_map = TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_transfo_xl
base_model_prefix = "transformer"
def _init_weight(self, weight):
if self.config.init == "uniform":
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == "normal":
nn.init.normal_(weight, 0.0, self.config.init_std)
def _init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def _init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
self._init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
elif classname.find("AdaptiveEmbedding") != -1:
if hasattr(m, "emb_projs"):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
self._init_weight(m.weight)
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
self._init_weight(m.cluster_weight)
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
self._init_bias(m.cluster_bias)
if hasattr(m, "out_projs"):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
else:
if hasattr(m, "r_emb"):
self._init_weight(m.r_emb)
if hasattr(m, "r_w_bias"):
self._init_weight(m.r_w_bias)
if hasattr(m, "r_r_bias"):
self._init_weight(m.r_r_bias)
if hasattr(m, "r_bias"):
self._init_bias(m.r_bias)
TRANSFO_XL_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
TRANSFO_XL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.TransfoXLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.n_token = config.vocab_size
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head,
config.d_model,
config.d_head,
config.d_inner,
config.dropout,
tgt_len=config.tgt_len,
ext_len=config.ext_len,
mem_len=config.mem_len,
dropatt=config.dropatt,
pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
output_attentions=self.output_attentions,
layer_norm_epsilon=config.layer_norm_epsilon,
)
)
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
raise NotImplementedError # Removed them to avoid maintaining dead code
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
self.init_weights()
def get_input_embeddings(self):
return self.word_emb
def set_input_embeddings(self, new_embeddings):
self.word_emb = new_embeddings
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def _prune_heads(self, heads):
logger.info("Head pruning is not implemented for Transformer-XL model")
pass
def init_mems(self, bsz):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, mlen, qlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import TransfoXLTokenizer, TransfoXLModel
import torch
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states, mems = outputs[:2]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.size()
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if mems is None:
mems = self.init_mems(bsz)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
if inputs_embeds is not None:
word_emb = inputs_embeds
else:
word_emb = self.word_emb(input_ids)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
else:
dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1 + mlen)[
:, :, None
]
hids = []
attentions = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
layer_outputs = layer(
core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i, head_mask=head_mask[i]
)
core_out = layer_outputs[0]
if self.output_attentions:
attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
# We transpose back here to shape [bsz, len, hidden_dim]
outputs = [core_out.transpose(0, 1).contiguous(), new_mems]
if self.output_hidden_states:
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
hids.append(core_out)
hids = list(t.transpose(0, 1).contiguous() for t in hids)
outputs.append(hids)
if self.output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = list(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs.append(attentions)
return outputs # last hidden state, new_mems, (all hidden states), (all attentions)
@add_start_docstrings(
"""The Transformer-XL Model with a language modeling head on top
(adaptive softmax with weights tied to the adaptive input embeddings)""",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
assert (
self.sample_softmax <= 0
), "Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310"
self.crit = ProjectedAdaptiveLogSoftmax(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.init_weights()
def tie_weights(self):
"""
Run this to be sure output and input (adaptive) softmax weights are tied
"""
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, bsz):
return self.transformer.init_mems(bsz)
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None, labels=None):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel
import torch
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, mems = outputs[:2]
"""
if input_ids is not None:
bsz, tgt_len = input_ids.size(0), input_ids.size(1)
elif inputs_embeds is not None:
bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds)
last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:]
outputs = transformer_outputs[1:]
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), labels)
if labels is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
outputs = [softmax_output] + outputs
else:
softmax_output = softmax_output.view(bsz, tgt_len)
outputs = [softmax_output, None] + outputs
return outputs # (loss), logits or None if labels is not None (speed up adaptive softmax), new_mems, (all hidden states), (all attentions)
def get_output_embeddings(self):
""" Double-check if you are using adaptive softmax.
"""
if self.sample_softmax > 0:
return self.out_layer
else:
return self.crit.out_layers[-1]
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
inputs = {"input_ids": input_ids}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
| 40,378 | 42.795011 | 151 |
py
|
lm-intervention
|
lm-intervention-master/transformers_modified/modeling_xlnet.py
|
""" A copy of transformers/modeling_xlnet.py from the Huggingface
transformers library modified so that the attention module is called with
non-keyword arguments (to make those arguments accessible to the hook).
"""
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import gelu_new, swish
from transformers.configuration_xlnet import XLNetConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
"xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None
):
### MODIFIED ###
# outputs = self.rel_attn(
# output_h,
# output_g,
# attn_mask_h,
# attn_mask_g,
# r,
# seg_mat,
# mems=mems,
# target_mapping=target_mapping,
# head_mask=head_mask,
# )
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems,
target_mapping,
head_mask,
)
### MODIFIED ###
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetModel.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=False)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
)
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {"input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForSequenceClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForTokenClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForTokenClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
scores = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
labels=None,
head_mask=None,
inputs_embeds=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForMultipleChoice
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnsweringSimple
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnswering
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
| 79,781 | 47.946012 | 304 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_n_val.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_n_val_data")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for n_val_data in cfg.setting.n_val_data_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=cfg.setting.n_actions,
dim_context=cfg.setting.dim_context,
beta=cfg.setting.beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=cfg.setting.n_unobserved_cat_dim,
n_deficient_actions=int(cfg.setting.n_actions * cfg.setting.n_def_actions),
reward_function=linear_reward_function,
reward_std=cfg.setting.reward_std,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
## OPE using validation data
if cfg.setting.embed_selection is False:
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
embed_selection=cfg.setting.embed_selection,
)
else:
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
embed_selection=cfg.setting.embed_selection,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["n_val_data"] = n_val_data
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"n_val_data={n_val_data}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="n_val_data",
xlabel="number of samples in the log data",
xticklabels=cfg.setting.n_val_data_list,
)
if __name__ == "__main__":
main()
| 6,247 | 37.331288 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_n_actions.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_n_action")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for n_action in cfg.setting.n_actions_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=n_action,
dim_context=cfg.setting.dim_context,
beta=cfg.setting.beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=cfg.setting.n_unobserved_cat_dim,
n_deficient_actions=int(n_action * cfg.setting.n_def_actions),
reward_function=linear_reward_function,
reward_std=cfg.setting.reward_std,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
## OPE using validation data
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
embed_selection=cfg.setting.embed_selection,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["n_action"] = n_action
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"n_action={n_action}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="n_action",
xlabel="number of actions",
xticklabels=cfg.setting.n_actions_list,
)
if __name__ == "__main__":
main()
| 5,801 | 36.432258 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_n_def_actions.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_n_def_actions")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for n_def_action in cfg.setting.n_def_actions_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=cfg.setting.n_actions,
dim_context=cfg.setting.dim_context,
beta=cfg.setting.beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=cfg.setting.n_unobserved_cat_dim,
n_irrelevant_cat_dim=cfg.setting.n_irrelevant_cat_dim,
n_deficient_actions=int(cfg.setting.n_actions * n_def_action),
reward_function=linear_reward_function,
reward_std=cfg.setting.reward_std,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
## OPE using validation data
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["n_def_actions"] = int(cfg.setting.n_actions * n_def_action)
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"n_def_action={n_def_action}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="n_def_actions",
xlabel=r"number of deficient actions",
xticklabels=cfg.setting.n_def_actions_list,
)
if __name__ == "__main__":
main()
| 5,899 | 37.064516 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_eps.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_eps")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for eps in cfg.setting.eps_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=cfg.setting.n_actions,
dim_context=cfg.setting.dim_context,
beta=cfg.setting.beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=cfg.setting.n_unobserved_cat_dim,
n_deficient_actions=int(cfg.setting.n_actions * cfg.setting.n_def_actions),
reward_function=linear_reward_function,
reward_std=cfg.setting.reward_std,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=eps,
)
## OPE using validation data
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
embed_selection=cfg.setting.embed_selection,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["eps"] = eps
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"epsilon={eps}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="eps",
xlabel="epsilon",
xticklabels=cfg.setting.eps_list,
)
if __name__ == "__main__":
main()
| 5,750 | 36.103226 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/ope.py
|
from typing import Dict
from typing import Optional
import numpy as np
from obp.ope import DirectMethod as DM
from obp.ope import DoublyRobust as DR
from obp.ope import DoublyRobustWithShrinkageTuning as DRos
from obp.ope import InverseProbabilityWeighting as IPS
from obp.ope import MarginalizedInverseProbabilityWeighting as MIPS
from obp.ope import OffPolicyEvaluation
from obp.ope import SubGaussianDoublyRobustTuning as SGDR
from obp.ope import SwitchDoublyRobustTuning as SwitchDR
def run_ope(
val_bandit_data: Dict,
action_dist_val: np.ndarray,
estimated_rewards: Optional[np.ndarray] = None,
estimated_rewards_mrdr: Optional[np.ndarray] = None,
embed_selection: bool = False,
) -> np.ndarray:
if embed_selection is False:
lambdas = [10, 50, 100, 500, 1e3, 5e3, 1e4, 5e4, 1e5, 5e5, np.inf]
lambdas_sg = [1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2, 0.1, 0.5, 1.0]
ope_estimators = [
IPS(estimator_name="IPS"),
DR(estimator_name="DR"),
DM(estimator_name="DM"),
SwitchDR(lambdas=lambdas, tuning_method="slope", estimator_name="SwitchDR"),
DRos(lambdas=lambdas, tuning_method="slope", estimator_name="DRos"),
SGDR(lambdas=lambdas_sg, tuning_method="slope", estimator_name="SGDR"),
MIPS(
n_actions=val_bandit_data["n_actions"],
embedding_selection_method=None,
estimator_name="MIPS",
),
MIPS(
n_actions=val_bandit_data["n_actions"],
embedding_selection_method=None,
estimator_name="MIPS (true)",
),
]
else:
ope_estimators = [
MIPS(
n_actions=val_bandit_data["n_actions"],
embedding_selection_method=None,
estimator_name="MIPS (true)",
),
MIPS(
n_actions=val_bandit_data["n_actions"],
embedding_selection_method="greedy",
min_emb_dim=5,
estimator_name="MIPS (slope)",
),
]
ope = OffPolicyEvaluation(
bandit_feedback=val_bandit_data,
ope_estimators=ope_estimators,
)
if embed_selection is False:
estimated_policy_values = ope.estimate_policy_values(
action_dist=action_dist_val,
estimated_rewards_by_reg_model=estimated_rewards,
action_embed=val_bandit_data["action_embed"],
pi_b=val_bandit_data["pi_b"],
p_e_a={"MIPS (true)": val_bandit_data["p_e_a"]},
)
else:
estimated_policy_values = ope.estimate_policy_values(
action_dist=action_dist_val,
estimated_rewards_by_reg_model=estimated_rewards,
action_embed=val_bandit_data["action_embed"],
pi_b=val_bandit_data["pi_b"],
p_e_a={
"MIPS (true)": val_bandit_data["p_e_a"],
"MIPS (slope)": val_bandit_data["p_e_a"],
},
)
return estimated_policy_values
| 3,080 | 35.678571 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_beta.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_beta")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for beta in cfg.setting.beta_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=cfg.setting.n_actions,
dim_context=cfg.setting.dim_context,
beta=beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=cfg.setting.n_unobserved_cat_dim,
n_deficient_actions=int(cfg.setting.n_actions * cfg.setting.n_def_actions),
reward_function=linear_reward_function,
reward_std=cfg.setting.reward_std,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
## OPE using validation data
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
embed_selection=cfg.setting.embed_selection,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["beta"] = beta
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"beta={beta}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="beta",
xlabel="beta",
xticklabels=cfg.setting.beta_list,
)
if __name__ == "__main__":
main()
| 5,764 | 36.193548 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/policy.py
|
import numpy as np
def gen_eps_greedy(
expected_reward: np.ndarray,
is_optimal: bool = True,
eps: float = 0.0,
) -> np.ndarray:
"Generate an evaluation policy via the epsilon-greedy rule."
base_pol = np.zeros_like(expected_reward)
if is_optimal:
a = np.argmax(expected_reward, axis=1)
else:
a = np.argmin(expected_reward, axis=1)
base_pol[
np.arange(expected_reward.shape[0]),
a,
] = 1
pol = (1.0 - eps) * base_pol
pol += eps / expected_reward.shape[1]
return pol[:, :, np.newaxis]
| 565 | 23.608696 | 64 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/plots.py
|
import matplotlib.pyplot as plt
import seaborn as sns
registered_colors = {
"MIPS": "tab:gray",
"MIPS (true)": "tab:orange",
"MIPS (w/ SLOPE)": "tab:green",
"IPS": "tab:red",
"DR": "tab:blue",
"DM": "tab:purple",
"SwitchDR": "tab:brown",
r"DR-$\lambda$": "tab:olive",
"DRos": "tab:pink",
}
def plot_line(
result_df,
log_path,
embed_selection,
x,
xlabel,
xticklabels,
) -> None:
plt.style.use("ggplot")
query_list = [
"(est == 'IPS' or est == 'DR' or est == 'DM' or est == 'MIPS' or est == 'MIPS (true)')",
"(est != 'MIPS (slope)')",
]
legend_list = [
["IPS", "DR", "DM", "MIPS", "MIPS (true)"],
[
"IPS",
"DR",
"DM",
"SwitchDR",
"DRos",
r"DR-$\lambda$",
"MIPS",
"MIPS (true)",
],
]
suffix_list = ["main", "all"]
if embed_selection is True:
query_list = [
"(est == 'MIPS (true)' or est == 'MIPS (slope)')",
]
legend_list = [
["MIPS (true)", "MIPS (w/ SLOPE)"],
]
suffix_list = ["slope"]
for query, legend, dir_ in zip(query_list, legend_list, suffix_list):
line_path = log_path / "fig" / dir_
line_path.mkdir(exist_ok=True, parents=True)
palette = [registered_colors[est] for est in legend]
### MSE ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=5,
marker="o",
markersize=8,
markers=True,
x=x,
y="se",
hue="est",
ax=ax,
palette=palette,
data=result_df.query(query),
)
# title and legend
ax.legend(
legend,
fontsize=25,
)
# yaxis
ax.set_yscale("log")
ax.set_ylabel("mean squared error (MSE)", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "mse.png")
### MSE ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=5,
legend=False,
marker="o",
markersize=8,
markers=True,
x=x,
y="se",
hue="est",
ax=ax,
palette=palette,
data=result_df.query(query),
)
# yaxis
ax.set_yscale("log")
ax.set_ylabel("mean squared error (MSE)", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "mse_no_legend.png")
### MSE ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=5,
marker="o",
markersize=8,
markers=True,
x=x,
y="se",
hue="est",
palette=palette,
ax=ax,
data=result_df.query(query),
)
# title and legend
ax.legend(
legend,
fontsize=25,
)
# yaxis
ax.set_ylabel("mean squared error (MSE)", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "mse_no_log.png")
### MSE ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=5,
marker="o",
markersize=8,
markers=True,
x=x,
y="se",
hue="est",
palette=palette,
ax=ax,
legend=False,
data=result_df.query(query),
)
# yaxis
ax.set_ylabel("mean squared error (MSE)", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "mse_no_log_no_legend.png")
### Bias ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=6,
marker="o",
markersize=8,
markers=True,
x=x,
y="bias",
hue="est",
palette=palette,
ax=ax,
ci=None,
data=result_df.query(query),
)
# title and legend
ax.legend(
legend,
fontsize=25,
)
# yaxis
ax.set_ylabel("squared bias", fontsize=25)
ax.tick_params(axis="y", labelsize=14)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "bias_no_log.png")
### Variance ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=6,
marker="o",
markersize=8,
markers=True,
x=x,
y="variance",
hue="est",
palette=palette,
ax=ax,
ci=None,
data=result_df.query(query),
)
# title and legend
ax.legend(
legend,
fontsize=25,
)
# yaxis
ax.set_yscale("log")
ax.set_ylabel("variance", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "variance.png")
### Variance ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=6,
legend=False,
marker="o",
markersize=8,
markers=True,
x=x,
y="variance",
hue="est",
palette=palette,
ax=ax,
ci=None,
data=result_df.query(query),
)
# yaxis
ax.set_yscale("log")
ax.set_ylabel("variance", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "variance_no_legend.png")
### Variance ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=6,
marker="o",
markersize=8,
markers=True,
x=x,
y="variance",
hue="est",
palette=palette,
ax=ax,
ci=None,
data=result_df.query(query),
)
# title and legend
ax.legend(
legend,
fontsize=25,
)
# yaxis
ax.set_ylabel("variance", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "variance_no_log.png")
### Variance ###
fig, ax = plt.subplots(figsize=(11, 7), tight_layout=True)
sns.lineplot(
linewidth=6,
marker="o",
markersize=8,
markers=True,
x=x,
y="variance",
hue="est",
palette=palette,
ax=ax,
ci=None,
legend=False,
data=result_df.query(query),
)
# yaxis
ax.set_ylabel("variance", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
if x in ["n_action", "n_val_data"]:
ax.set_xscale("log")
ax.set_xlabel(xlabel, fontsize=25)
ax.set_xticks(xticklabels)
ax.set_xticklabels(xticklabels, fontsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(line_path / "variance_no_log_no_legend.png")
| 9,993 | 28.480826 | 96 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_noise.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_noise")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for noise in cfg.setting.noise_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=cfg.setting.n_actions,
dim_context=cfg.setting.dim_context,
beta=cfg.setting.beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=cfg.setting.n_unobserved_cat_dim,
n_deficient_actions=int(cfg.setting.n_actions * cfg.setting.n_def_actions),
reward_function=linear_reward_function,
reward_std=noise,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
## OPE using validation data
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
embed_selection=cfg.setting.embed_selection,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["noise"] = noise
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"noise={noise}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="noise",
xlabel="noise level",
xticklabels=cfg.setting.noise_list,
)
if __name__ == "__main__":
main()
| 5,775 | 36.264516 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/synthetic/main_n_unobs_cat_dim.py
|
from logging import getLogger
from pathlib import Path
from time import time
import warnings
import hydra
import numpy as np
from obp.dataset import linear_reward_function
from obp.dataset import SyntheticBanditDatasetWithActionEmbeds
from obp.ope import RegressionModel
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
from plots import plot_line
from policy import gen_eps_greedy
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./varying_n_unobserved_cat_dim")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
random_state = cfg.setting.random_state
elapsed_prev = 0.0
result_df_list = []
for n_unobserved_cat_dim in cfg.setting.n_unobserved_cat_dim_list:
estimated_policy_value_list = []
## define a dataset class
dataset = SyntheticBanditDatasetWithActionEmbeds(
n_actions=cfg.setting.n_actions,
dim_context=cfg.setting.dim_context,
beta=cfg.setting.beta,
reward_type="continuous",
n_cat_per_dim=cfg.setting.n_cat_per_dim,
latent_param_mat_dim=cfg.setting.latent_param_mat_dim,
n_cat_dim=cfg.setting.n_cat_dim,
n_unobserved_cat_dim=n_unobserved_cat_dim,
n_deficient_actions=int(cfg.setting.n_actions * cfg.setting.n_def_actions),
reward_function=linear_reward_function,
reward_std=cfg.setting.reward_std,
random_state=random_state,
)
### test bandit data is used to approximate the ground-truth policy value
test_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_test_data
)
action_dist_test = gen_eps_greedy(
expected_reward=test_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
policy_value = dataset.calc_ground_truth_policy_value(
expected_reward=test_bandit_data["expected_reward"],
action_dist=action_dist_test,
)
for _ in range(cfg.setting.n_seeds):
## generate validation data
val_bandit_data = dataset.obtain_batch_bandit_feedback(
n_rounds=cfg.setting.n_val_data,
)
## make decisions on validation data
action_dist_val = gen_eps_greedy(
expected_reward=val_bandit_data["expected_reward"],
is_optimal=cfg.setting.is_optimal,
eps=cfg.setting.eps,
)
## OPE using validation data
reg_model = RegressionModel(
n_actions=dataset.n_actions,
action_context=val_bandit_data["action_context"],
base_model=RandomForestRegressor(
n_estimators=10,
max_samples=0.8,
random_state=random_state + _,
),
)
estimated_rewards = reg_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=random_state + _,
)
estimated_policy_values = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=action_dist_val,
estimated_rewards=estimated_rewards,
embed_selection=cfg.setting.embed_selection,
)
estimated_policy_value_list.append(estimated_policy_values)
## summarize results
result_df = (
DataFrame(DataFrame(estimated_policy_value_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "value"})
)
result_df["n_unobserved_cat_dim"] = n_unobserved_cat_dim
result_df["se"] = (result_df.value - policy_value) ** 2
result_df["bias"] = 0
result_df["variance"] = 0
sample_mean = DataFrame(result_df.groupby(["est"]).mean().value).reset_index()
for est_ in sample_mean["est"]:
estimates = result_df.loc[result_df["est"] == est_, "value"].values
mean_estimates = sample_mean.loc[sample_mean["est"] == est_, "value"].values
mean_estimates = np.ones_like(estimates) * mean_estimates
result_df.loc[result_df["est"] == est_, "bias"] = (
policy_value - mean_estimates
) ** 2
result_df.loc[result_df["est"] == est_, "variance"] = (
estimates - mean_estimates
) ** 2
result_df_list.append(result_df)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(
f"n_unobs_emb_dim={n_unobserved_cat_dim}: {elapsed}min (diff {diff}min)"
)
elapsed_prev = elapsed
# aggregate all results
result_df = pd.concat(result_df_list).reset_index(level=0)
result_df.to_csv(df_path / "result_df.csv")
plot_line(
result_df=result_df,
log_path=log_path,
embed_selection=cfg.setting.embed_selection,
x="n_unobserved_cat_dim",
xlabel="number of unobserved embedding dimensions",
xticklabels=cfg.setting.n_unobserved_cat_dim_list,
)
if __name__ == "__main__":
main()
| 5,962 | 36.980892 | 88 |
py
|
icml2022-mips
|
icml2022-mips-main/src/real/main.py
|
from dataclasses import dataclass
from logging import getLogger
from pathlib import Path
from time import time
from typing import Optional
import warnings
import hydra
import matplotlib.pyplot as plt
import numpy as np
from obp.dataset import OpenBanditDataset
from obp.ope import RegressionModel
from obp.policy import BernoulliTS
from obp.policy import Random
from obp.types import BanditFeedback
from omegaconf import DictConfig
from ope import run_ope
import pandas as pd
from pandas import DataFrame
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from sklearn.utils import check_scalar
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
logger = getLogger(__name__)
registered_colors = {
"MIPS (w/o SLOPE)": "tab:gray",
"MIPS (w/ SLOPE)": "tab:green",
"IPS": "tab:red",
"DR": "tab:blue",
"DM": "tab:purple",
"SwitchDR": "tab:brown",
"MRDR": "tab:cyan",
r"DR-$\lambda$": "tab:olive",
"DRos": "tab:pink",
}
@dataclass
class ModifiedOpenBanditDataset(OpenBanditDataset):
@property
def n_actions(self) -> int:
"""Number of actions."""
return int(self.action.max() + 1)
def pre_process(self) -> None:
"""Preprocess raw open bandit dataset."""
user_cols = self.data.columns.str.contains("user_feature")
self.context = pd.get_dummies(
self.data.loc[:, user_cols], drop_first=True
).values
pos = DataFrame(self.position)
self.action_context = (
self.item_context.drop(columns=["item_id", "item_feature_0"], axis=1)
.apply(LabelEncoder().fit_transform)
.values
)
self.action_context = self.action_context[self.action]
self.action_context = np.c_[self.action_context, pos]
self.action = self.position * self.n_actions + self.action
self.position = np.zeros_like(self.position)
self.pscore /= 3
def sample_bootstrap_bandit_feedback(
self,
sample_size: Optional[int] = None,
test_size: float = 0.3,
is_timeseries_split: bool = False,
random_state: Optional[int] = None,
) -> BanditFeedback:
if is_timeseries_split:
bandit_feedback = self.obtain_batch_bandit_feedback(
test_size=test_size, is_timeseries_split=is_timeseries_split
)[0]
else:
bandit_feedback = self.obtain_batch_bandit_feedback(
test_size=test_size, is_timeseries_split=is_timeseries_split
)
n_rounds = bandit_feedback["n_rounds"]
if sample_size is None:
sample_size = bandit_feedback["n_rounds"]
else:
check_scalar(
sample_size,
name="sample_size",
target_type=(int),
min_val=0,
max_val=n_rounds,
)
random_ = check_random_state(random_state)
bootstrap_idx = random_.choice(
np.arange(n_rounds), size=sample_size, replace=True
)
for key_ in [
"action",
"position",
"reward",
"pscore",
"context",
"action_context",
]:
bandit_feedback[key_] = bandit_feedback[key_][bootstrap_idx]
bandit_feedback["n_rounds"] = sample_size
return bandit_feedback
@hydra.main(config_path="./conf", config_name="config")
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f"The current working directory is {Path().cwd()}")
start_time = time()
# log path
log_path = Path("./all")
df_path = log_path / "df"
df_path.mkdir(exist_ok=True, parents=True)
fig_path = log_path / "fig"
fig_path.mkdir(exist_ok=True, parents=True)
# configurations
sample_size = cfg.setting.sample_size
random_state = cfg.setting.random_state
obd_path = Path().cwd().parents[1] / "open_bandit_dataset"
# define policies
policy_ur = Random(
n_actions=80,
len_list=3,
random_state=random_state,
)
policy_ts = BernoulliTS(
n_actions=80,
len_list=3,
random_state=random_state,
is_zozotown_prior=True,
campaign="all",
)
# calc ground-truth policy value (on-policy)
policy_value = ModifiedOpenBanditDataset.calc_on_policy_policy_value_estimate(
behavior_policy="bts", campaign="all", data_path=obd_path
)
# define a dataset class
dataset = ModifiedOpenBanditDataset(
behavior_policy="random",
data_path=obd_path,
campaign="all",
)
elapsed_prev = 0.0
squared_error_list = []
relative_squared_error_list = []
for t in np.arange(cfg.setting.n_seeds):
pi_b = policy_ur.compute_batch_action_dist(n_rounds=sample_size)
pi_e = policy_ts.compute_batch_action_dist(n_rounds=sample_size)
pi_e = pi_e.reshape(sample_size, 240, 1) / 3
val_bandit_data = dataset.sample_bootstrap_bandit_feedback(
sample_size=sample_size,
random_state=t,
)
val_bandit_data["pi_b"] = pi_b.reshape(sample_size, 240, 1) / 3
regression_model = RegressionModel(
n_actions=dataset.n_actions,
base_model=RandomForestClassifier(
n_estimators=10, max_samples=0.8, random_state=12345
),
)
estimated_rewards = regression_model.fit_predict(
context=val_bandit_data["context"], # context; x
action=val_bandit_data["action"], # action; a
reward=val_bandit_data["reward"], # reward; r
n_folds=2,
random_state=12345,
)
squared_errors, relative_squared_errors = run_ope(
val_bandit_data=val_bandit_data,
action_dist_val=pi_e,
estimated_rewards=estimated_rewards,
estimated_rewards_mrdr=estimated_rewards,
policy_value=policy_value,
)
squared_error_list.append(squared_errors)
relative_squared_error_list.append(relative_squared_errors)
elapsed = np.round((time() - start_time) / 60, 2)
diff = np.round(elapsed - elapsed_prev, 2)
logger.info(f"t={t}: {elapsed}min (diff {diff}min)")
elapsed_prev = elapsed
# aggregate all results
result_df = (
DataFrame(DataFrame(squared_error_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "se"})
)
result_df.reset_index(inplace=True, drop=True)
result_df.to_csv(df_path / "result_df.csv")
rel_result_df = (
DataFrame(DataFrame(relative_squared_error_list).stack())
.reset_index(1)
.rename(columns={"level_1": "est", 0: "se"})
)
rel_result_df.reset_index(inplace=True, drop=True)
rel_result_df.to_csv(df_path / "rel_result_df.csv")
# plot CDFs
estimators = result_df.est.unique().tolist()
palette = [registered_colors[est] for est in estimators[::-1]]
### CDF of relative SE ###
fig, ax = plt.subplots(figsize=(12, 7), tight_layout=True)
sns.ecdfplot(
linewidth=4,
palette=palette,
data=rel_result_df,
x="se",
hue="est",
hue_order=estimators[::-1],
ax=ax,
)
# title and legend
ax.legend(estimators, loc="upper left", fontsize=22)
# yaxis
ax.set_ylabel("probability", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
ax.set_xscale("log")
ax.set_xlabel("relative squared errors w.r.t. IPS", fontsize=25)
ax.tick_params(axis="x", labelsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(fig_path / "relative_cdf.png")
### CDF of relative SE zoom ###
fig, ax = plt.subplots(figsize=(12, 7), tight_layout=True)
sns.ecdfplot(
linewidth=4,
palette=palette,
data=rel_result_df,
x="se",
hue="est",
hue_order=estimators[::-1],
ax=ax,
)
# title and legend
ax.legend(estimators, loc="upper left", fontsize=22)
# yaxis
ax.set_ylabel("probability", fontsize=25)
ax.tick_params(axis="y", labelsize=18)
ax.yaxis.set_label_coords(-0.08, 0.5)
# xaxis
ax.set_xscale("log")
ax.set_xlim(0.09, 10)
ax.set_xlabel("relative squared errors w.r.t. IPS", fontsize=25)
ax.tick_params(axis="x", labelsize=18)
ax.xaxis.set_label_coords(0.5, -0.1)
plt.savefig(fig_path / "relative_cdf_zoom.png")
if __name__ == "__main__":
main()
| 8,837 | 30.791367 | 82 |
py
|
icml2022-mips
|
icml2022-mips-main/src/real/ope.py
|
import itertools
from typing import Dict
from typing import Optional
import numpy as np
from obp.ope import BaseOffPolicyEstimator
from obp.ope import DirectMethod as DM
from obp.ope import DoublyRobust as DR
from obp.ope import DoublyRobustWithShrinkageTuning as DRos
from obp.ope import InverseProbabilityWeighting as IPS
from obp.ope import OffPolicyEvaluation
from obp.ope import SubGaussianDoublyRobustTuning as SGDR
from obp.ope import SwitchDoublyRobustTuning as SwitchDR
from obp.utils import check_array
from scipy import stats
from sklearn.naive_bayes import CategoricalNB
class MIPS(BaseOffPolicyEstimator):
def _estimate_round_rewards(
self,
context: np.ndarray,
reward: np.ndarray,
action: np.ndarray,
action_emb: np.ndarray,
action_dist_b: np.ndarray,
action_dist_e: np.ndarray,
position: Optional[np.ndarray] = None,
n_actions: Optional[int] = None,
delta: float = 0.05,
with_cnf: bool = False,
**kwargs,
) -> np.ndarray:
n = reward.shape[0]
w_x_e = self._estimate_w_x_e(
context=context,
action=action,
action_emb=action_emb,
pi_e=action_dist_e[:, :, 0],
pi_b=action_dist_b[:, :, 0],
n_actions=n_actions,
)
if with_cnf:
r_hat = reward * w_x_e
cnf = np.sqrt(np.var(r_hat) / (n - 1))
cnf *= stats.t.ppf(1.0 - (delta / 2), n - 1)
return r_hat.mean(), cnf
return reward * w_x_e
def _estimate_w_x_e(
self,
context: np.ndarray,
action: np.ndarray,
action_emb: np.ndarray,
pi_b: np.ndarray,
pi_e: np.ndarray,
n_actions: int,
) -> np.ndarray:
n = action.shape[0]
realized_actions = np.unique(action)
w_x_a = pi_e / pi_b
w_x_a = np.where(w_x_a < np.inf, w_x_a, 0)
p_a_e_model = CategoricalNB()
p_a_e_model.fit(action_emb, action)
p_a_e = np.zeros((n, n_actions))
p_a_e[:, realized_actions] = p_a_e_model.predict_proba(action_emb)
w_x_e = (w_x_a * p_a_e).sum(1)
return w_x_e
def estimate_policy_value(
self,
context: np.ndarray,
reward: np.ndarray,
action: np.ndarray,
action_emb: np.ndarray,
action_dist_b: np.ndarray,
action_dist_e: np.ndarray,
n_actions: int,
position: Optional[np.ndarray] = None,
min_emb_dim: int = 1,
feature_pruning: Optional[str] = None,
**kwargs,
) -> np.ndarray:
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action_emb, name="action_emb", expected_dim=2)
check_array(array=action_dist_b, name="action_dist_b", expected_dim=3)
check_array(array=action_dist_e, name="action_dist_e", expected_dim=3)
if feature_pruning == "exact":
return self._estimate_with_exact_pruning(
context=context,
reward=reward,
action=action,
action_emb=action_emb,
action_dist_b=action_dist_b,
action_dist_e=action_dist_e,
n_actions=n_actions,
position=position,
min_emb_dim=min_emb_dim,
)
else:
return self._estimate_round_rewards(
context=context,
reward=reward,
action=action,
action_emb=action_emb,
action_dist_b=action_dist_b,
action_dist_e=action_dist_e,
n_actions=n_actions,
position=position,
).mean()
def _estimate_with_exact_pruning(
self,
context: np.ndarray,
reward: np.ndarray,
action: np.ndarray,
action_emb: np.ndarray,
action_dist_b: np.ndarray,
action_dist_e: np.ndarray,
n_actions: int,
position: Optional[np.ndarray] = None,
min_emb_dim: int = 1,
) -> float:
n_emb_dim = action_emb.shape[1]
min_emb_dim = np.int32(np.minimum(n_emb_dim, min_emb_dim))
theta_list, cnf_list = [], []
feat_list, C = np.arange(n_emb_dim), np.sqrt(6) - 1
for i in np.arange(n_emb_dim, min_emb_dim - 1, -1):
comb_list = list(itertools.combinations(feat_list, i))
theta_list_, cnf_list_ = [], []
for comb in comb_list:
theta, cnf = self._estimate_round_rewards(
context=context,
reward=reward,
action=action,
action_emb=action_emb[:, comb],
action_dist_b=action_dist_b,
action_dist_e=action_dist_e,
n_actions=n_actions,
with_cnf=True,
)
if len(theta_list) > 0:
theta_list_.append(theta), cnf_list_.append(cnf)
else:
theta_list.append(theta), cnf_list.append(cnf)
continue
idx_list = np.argsort(cnf_list_)[::-1]
for idx in idx_list:
theta_i, cnf_i = theta_list_[idx], cnf_list_[idx]
theta_j, cnf_j = np.array(theta_list), np.array(cnf_list)
if (np.abs(theta_j - theta_i) <= cnf_i + C * cnf_j).all():
theta_list.append(theta_i), cnf_list.append(cnf_i)
else:
return theta_j[-1]
return theta_j[-1]
def estimate_interval(self) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure."""
return NotImplementedError
def run_ope(
val_bandit_data: Dict,
action_dist_val: np.ndarray,
estimated_rewards: np.ndarray,
estimated_rewards_mrdr: np.ndarray,
policy_value: float,
) -> np.ndarray:
lambdas = [10, 50, 100, 500, 1e3, 5e3, 1e4, 5e4, 1e5, 5e5, np.inf]
lambdas_sg = [1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2, 0.1, 0.5, 1.0]
ope = OffPolicyEvaluation(
bandit_feedback=val_bandit_data,
ope_estimators=[
IPS(estimator_name="IPS"),
DR(estimator_name="DR"),
DM(estimator_name="DM"),
SwitchDR(lambdas=lambdas, tuning_method="slope", estimator_name="SwitchDR"),
DR(estimator_name="MRDR"),
DRos(lambdas=lambdas, tuning_method="slope", estimator_name="DRos"),
SGDR(
lambdas=lambdas_sg,
tuning_method="slope",
estimator_name=r"DR-$\lambda$",
),
],
)
estimated_rewards_dict = {
"DR": estimated_rewards,
"DM": estimated_rewards,
"SwitchDR": estimated_rewards,
"MRDR": estimated_rewards_mrdr,
"DRos": estimated_rewards,
r"DR-$\lambda$": estimated_rewards,
}
squared_errors = ope.evaluate_performance_of_estimators(
ground_truth_policy_value=policy_value,
action_dist=action_dist_val,
estimated_rewards_by_reg_model=estimated_rewards_dict,
metric="se",
)
mips_estimate = MIPS().estimate_policy_value(
context=val_bandit_data["context"],
reward=val_bandit_data["reward"],
action=val_bandit_data["action"],
action_emb=val_bandit_data["action_context"],
action_dist_b=val_bandit_data["pi_b"],
action_dist_e=action_dist_val,
n_actions=val_bandit_data["n_actions"],
feature_pruning="no",
)
squared_errors["MIPS (w/o SLOPE)"] = (policy_value - mips_estimate) ** 2
mips_estimate_slope = MIPS().estimate_policy_value(
context=val_bandit_data["context"],
reward=val_bandit_data["reward"],
action=val_bandit_data["action"],
action_emb=val_bandit_data["action_context"],
action_dist_b=val_bandit_data["pi_b"],
action_dist_e=action_dist_val,
n_actions=val_bandit_data["n_actions"],
feature_pruning="exact",
)
squared_errors["MIPS (w/ SLOPE)"] = (policy_value - mips_estimate_slope) ** 2
relative_squared_errors = {}
baseline = squared_errors["IPS"]
for key, value in squared_errors.items():
relative_squared_errors[key] = value / baseline
return squared_errors, relative_squared_errors
| 8,411 | 33.904564 | 96 |
py
|
litemangle
|
litemangle-master/litemangle.py
|
#!/usr/bin/env python
#
import numpy as N
import os
import sys
import re
import string
class LiteMangle:
"""
LiteMangle:
A Python class to implement some very basic mangle routines which
allow us to manipulate a mangle mask.
This is a subset of the more complicated Python-based Mangle package,
though it contains more methods than strictly necessary.
The class is initialized with 1 argument, the file name of an
ascii string (in Mangle polygon format) containing the mask.
"""
__author__ = "Martin White"
__version__ = "1.1"
__email__ = "[email protected]"
def incap_spam(self,cap,x0,y0,z0):
"""
incap_spam(self,cap,x0,y0,z0):
This is an internal routine which you shouldn't need to use.
Returns True for each (theta,phi) that lies in the cap specified by the
4-vector "cap" containing (x,y,z,cm), and False for the rest.
"""
cd = 1.0-cap[0]*x0-cap[1]*y0-cap[2]*z0
return(((cap[3]<0.)&(cd>N.fabs(cap[3])))|((cap[3]>0.)&(cd<cap[3])))
def inpoly_spam(self,polygon,x0,y0,z0):
"""
inpoly_spam(self,polygon,theta,phi):
This is an internal routine which you shouldn't need to use.
Returns True if (theta,phi) is in the polygon, i.e. if it is
within all of the caps in the polygon.
A polygon is a list giving the polygon number, the weight and
then all of the caps (each cap is a 4-vector/4-list).
"""
test = N.ones(len(x0),dtype=bool)
for x in polygon[3:]:
test &= self.incap_spam(x,x0,y0,z0)
return(test)
def get_polyids(self,ra,dec):
"""
polyid(self,ra,dec):
Return the ID number of polygons given arrays of RA and DEC
in decimal degrees (assumed to be numpy arrays).
"""
theta = N.pi/180. * (90.0-dec)
phi = N.pi/180. * ra
sintheta = N.sin(theta)
x0 = sintheta*N.cos(phi)
y0 = sintheta*N.sin(phi)
z0 = N.cos(theta)
goodpolys = -N.ones(len(ra),dtype='i8')
for poly in self.polylist:
test = self.inpoly_spam(poly,x0,y0,z0)
goodpolys[test] = poly[0]
return(goodpolys)
def get_areas(self,ra,dec):
"""
get_areas(self,ra,dec):
Return the areas of the polygons containing each RA/Dec pair.
Result is in steradians.
"""
theta = N.pi/180. * (90.0-dec)
phi = N.pi/180. * ra
sintheta = N.sin(theta)
x0 = sintheta*N.cos(phi)
y0 = sintheta*N.sin(phi)
z0 = N.cos(theta)
goodpolys = -N.ones(len(ra))
for poly in self.polylist:
test = self.inpoly_spam(poly,x0,y0,z0)
goodpolys[test] = poly[2]
return(goodpolys)
def get_all_areas(self):
"""
get_all_areas(self):
Return an array containing the areas of all polygons, indexed
by polyid.
Result is in steradians.
"""
area = N.zeros(self.npoly)
for poly in self.polylist:
area[poly[0]] = poly[2]
return(area)
def get_weights(self,ra,dec):
"""
get_weights(self,ra,dec):
Return the weights of the polygons containing each RA/Dec pair,
for arrays of RA and DEC in decimal degrees.
"""
theta = N.pi/180. * (90.0-dec)
phi = N.pi/180. * ra
sintheta = N.sin(theta)
x0 = sintheta*N.cos(phi)
y0 = sintheta*N.sin(phi)
z0 = N.cos(theta)
goodpolys = -N.ones(len(ra))
for poly in self.polylist:
test = self.inpoly_spam(poly,x0,y0,z0)
goodpolys[test] = poly[1]
return(goodpolys)
def get_all_weights(self):
"""
get_all_weights(self):
Return an array containing the weights of all polygons, indexed
by polyid.
"""
weight = N.zeros(self.npoly)
for poly in self.polylist:
weight[poly[0]] = poly[1]
return(weight)
def total_area(self):
"""
total_area(self):
Returns the total area in the mask (i.e. the sum of the areas of
each polygon) and the total "effective" area (i.e. the area weighted
by the completeness).
Returns (tot_area,eff_area).
"""
tot_area,eff_area = 0.0,0.0
for poly in self.polylist:
tot_area += poly[2]
eff_area += poly[2]*poly[1]
return((tot_area,eff_area))
def set_weights(self,weight):
"""
set_weights(self,weight):
Sets the weight entries of the polygons to the weight array.
The weight array should be of length Npoly.
"""
for i,poly in enumerate(self.polylist):
self.polylist[i][1]=weight[poly[0]]
def set_one_weight(self,polyid,weight):
"""
set_one_weight(self,polyid,weight):
Sets the weight entry of the single polygon "polyid" to weight.
"""
for i,poly in enumerate(self.polylist):
if poly[0]==polyid:
self.polylist[i][1]=weight
break
def set_all_weights(self,weight=0.0):
"""
set_all_weights(self,weight=0.0):
Sets the weight entry of all polygons to "weight", a scalar,
typically used to set everything to 0.
"""
for i in range(len(self.polylist)):
self.polylist[i][1]=weight
def write_ply(self,fn):
"""
write_ply(self,fn):
Writes a Mangle-formatted polygon file containing the information
in the class.
This does not include pixelization information. Any mask written
in this manner should be pixelized after the fact using the Mangle
command line tools, if necessary.
"""
ff = open(fn,"w")
ff.write("%d polygons\n"%len(self.polylist))
for poly in self.polylist:
str = "polygon %10d ( %d caps,"%(poly[0],len(poly[3:]))
str+= " %.8f weight, %.15f str):\n"%(poly[1],poly[2])
ff.write(str)
for cap in poly[3:]:
ff.write("%25.20f %25.20f %25.20f %25.20f\n"%\
(cap[0],cap[1],cap[2],cap[3]))
ff.close()
def __init__(self,fn):
"""
__init__(self,fn):
The class is initialized with the name of an ascii file containing
the Mangle mask.
"""
if not os.path.exists(fn):
raise RuntimeError,"Can not find %s"%fn
#
# It's useful to pre-compile a regular expression for a mangle line
# defining a polygon.
ex1 = re.compile(r"polygon\s+(\d+)\s+\(\s*(\d+)\s+caps")
ex2 = re.compile(r"(\d*\.?\d+)\s+weight")
ex3 = re.compile(r"(\d*\.?\d+)\s+str")
#
ff = open(fn,"r")
self.npoly = 0
line = ff.readline()
ss = re.match(r"(\d+)\s+polygons",line)
if ss==None:
raise RuntimeError,"Can not parse 1st line of %s"%fn
else:
self.npoly = int( ss.group(1) )
#
self.polylist = []
#
ss = ex1.match(line)
while len(line)>0:
while (ss==None)&(len(line)>0):
line = ff.readline()
ss = ex1.match(line)
if len(line)>0:
ipoly= int(ss.group(1))
ncap = int(ss.group(2))
# Check to see if we have a weight.
ss = ex2.search(line)
if ss==None:
weight=0.0
else:
weight=float(ss.group(1))
# Check to see if we have an area.
ss = ex3.search(line)
if ss==None:
area= -1.0
else:
area=float(ss.group(1))
polyg= [ipoly,weight,area]
for i in range(ncap):
line = ff.readline()
cap = [float(x) for x in string.split(line)]
polyg.append(cap)
self.polylist.append(polyg)
ss=None
ff.close()
# Check whether the polyids are sequential and range from 0 to npoly
# If they don't, then there may be a problem with the file.
# NOTE: this should always be correct for current_boss_geometry.
badcounter = 0
for i,poly in enumerate(self.polylist):
if i != poly[0]:
badcounter += 1
if badcounter > 0:
print "WARNING!!!!"
print "Found",badcounter,"polygons out of order."
if len(self.polylist) != self.npoly:
print "Got %d polygons, expecting %d."%\
(len(self.polylist),self.npoly)
| 8,796 | 33.229572 | 79 |
py
|
randCache
|
randCache-main/Type-III/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
import random
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
# cli_args = parse_cli_args()
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
# vars(cli_args)['replacement_policy'] = 'lru'
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
timing_list = []
for word, timing in timing_vals.items():
timing_list.append(timing)
return timing_list
# if __name__ == '__main__':
# main([3, 80, 41, 786, 874, 875, 198, 456, 675, 325, 81, 142, 712, 564, 560, 345])
| 2,865 | 24.81982 | 88 |
py
|
randCache
|
randCache-main/Type-III/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Type-III/getHM.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 14:29:38 2021
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("timing_nopart_rand_cache.txt", 'r') as f1:
file = f1.readlines()
timing = list(filter(None, list(map(lambda each:each.strip("\n"), file))))
count_hits = [0] * len(timing)
for i, rows in enumerate(timing):
rows = list(map(int, rows.split()))
count_hits[i] = 0
for item in rows:
count_hits[i] += 1
timing[i] = rows
count_hits = list(filter(lambda x: x > 0, count_hits))
index = []
for i in range(1, len(count_hits) + 1):
index.append(i)
fig=plt.figure(figsize=(8, 2))
ax = plt.gca()
plt.bar(index, count_hits, width=0.5)
plt.xlabel("Number of ways (associativity)", fontweight='bold', fontsize=12)
plt.ylabel("Number of trials \n per collision", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("Type-III cache.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 1,109 | 23.666667 | 79 |
py
|
randCache
|
randCache-main/Type-III/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Type-III/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
from present import Present
class BinaryAddress(str):
partition = None
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
global partition
total_ways = num_partitions * ways_per_partition
if (num_partitions > total_ways):
num_partitions = total_ways
partition = random.randint(0, num_partitions - 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
global partition
plaintext = bin(int(self[:-(num_offset_bits)], 2))[2:].zfill(64)
key = bin(int('00000000000000000011', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - (num_partitions * num_index_bits)
end = len(ciphertext) - num_offset_bits
index = ciphertext[start:end]
if (len(index) != 0):
return index
else:
return None
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 2,227 | 31.289855 | 85 |
py
|
randCache
|
randCache-main/Type-III/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 200
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - ((ref.partition + 1) * num_index_bits)
end = len(ref.index) - (ref.partition * num_index_bits)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, word_addrs):
num_blocks = cache_size // num_words_per_block
num_sets = num_blocks // num_blocks_per_set
ways_per_partition = num_blocks_per_set // num_partitions
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition)
cache = Cache(num_sets = num_sets, num_index_bits = num_index_bits, num_partitions = num_partitions, ways_per_partition = ways_per_partition)
cache.read_refs(num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs)
timing_vals = self.emulate_timing(refs)
refs = self.set_index(num_partitions, num_index_bits, refs)
table_width = max((shutil.get_terminal_size((DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH))
# print()
# self.display_addr_refs(refs, table_width)
# print()
# self.display_cache(cache, table_width, refs)
# print()
return timing_vals
| 4,679 | 35.850394 | 163 |
py
|
randCache
|
randCache-main/Type-III/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
import random
class Cache(dict):
partition = None
cal_index = None
def __init__(self, cache=None, num_sets=None, num_index_bits=None, num_partitions = None, ways_per_partition = None):
self.recently_used_addrs = []
if cache is not None:
self.update(cache)
else:
for j in range(num_partitions):
for i in range(num_sets):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_index_bits)
self[str(j)+str(index)] = []
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def is_hit(self, addr_partition, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index) / num_partitions)
blocks = []
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
num_index_bits = int(len(addr_index) / num_partitions)
for i in range(num_partitions):
start = len(addr_index) - ((i + 1) * num_index_bits)
end = len(addr_index) - (i * num_index_bits)
actual_index = addr_index[start:end]
if (str(i)+str(actual_index)) in self:
if self[(str(i)+str(actual_index))] == []:
continue
else:
blocks = self[str(i)+str(actual_index)]
for block in blocks:
if (block['tag'] == addr_tag):
partition = i
cal_index = actual_index
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
if (replacement_policy == 'rand'):
repl_block_index = random.randint(0, (num_blocks_per_set // num_partition) - 1)
for (i, block) in enumerate(blocks):
if (i == repl_block_index):
blocks[i] = new_entry
return
if (replacement_policy == 'lru'):
recently_used_addrs = self.recently_used_addrs
for recent_index, recent_tag in recently_used_addrs:
for i, block in enumerate(blocks):
if (recent_index == addr_index and block['tag'] == recent_tag):
blocks[i] = new_entry
return
def set_block(self, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
num_index_bits = int(len(addr_index) / num_partition)
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
start = len(addr_index) - ((addr_partition + 1) * num_index_bits)
end = len(addr_index) - (addr_partition * num_index_bits)
addr_index = addr_index[start:end]
blocks = self[str(addr_partition)+ (str(addr_index).zfill(num_index_bits))]
if (len(blocks) == (num_blocks_per_set // num_partition)):
self.replace_block(blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry)
else:
blocks.append(new_entry)
def read_refs(self, num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs):
for ref in refs:
self.mark_ref_as_last_seen(ref)
if self.is_hit(ref.partition, ref.index, ref.tag, num_partitions):
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
else:
ref.cache_status = ReferenceCacheStatus.miss
self.set_block(
replacement_policy = replacement_policy,
num_blocks_per_set = num_blocks_per_set,
addr_partition = ref.partition,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block)
)
| 5,073 | 36.865672 | 132 |
py
|
randCache
|
randCache-main/Type-III/present.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 23:59:35 2021
@author: anirban
"""
class Present:
def __init__(self,key,rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if (len(key) == 80):
self.roundkeys = generateRoundkeys80(string2number(key),self.rounds)
elif (len(key) == 128):
self.roundkeys = generateRoundkeys128(string2number(key),self.rounds)
else:
raise ValueError("Key must be a 128-bit or 80-bit rawstring")
def encrypt(self,block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state,self.roundkeys[-1])
return number2string_N(cipher,8)
def decrypt(self,block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[-i-1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state,self.roundkeys[0])
return number2string_N(decipher,8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox= [0xc,0x5,0x6,0xb,0x9,0x0,0xa,0xd,0x3,0xe,0xf,0x8,0x4,0x7,0x1,0x2]
Sbox_inv = [Sbox.index(x) for x in range(16)]
PBox = [0,16,32,48,1,17,33,49,2,18,34,50,3,19,35,51,
4,20,36,52,5,21,37,53,6,22,38,54,7,23,39,55,
8,24,40,56,9,25,41,57,10,26,42,58,11,27,43,59,
12,28,44,60,13,29,45,61,14,30,46,62,15,31,47,63]
PBox_inv = [PBox.index(x) for x in range(64)]
def generateRoundkeys80(key,rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >>16)
#1. Shift
#rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2**19-1)) << 61) + (key >> 19)
#2. SBox
#rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76)+(key & (2**76-1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key,rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >>64)
#1. Shift
key = ((key & (2**67-1)) << 61) + (key >> 67)
#2. SBox
key = (Sbox[key >> 124] << 124)+(Sbox[(key >> 120) & 0xF] << 120)+(key & (2**120-1))
#3. Salt
#rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state,roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox[( state >> (i*4)) & 0xF] << (i*4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox_inv[( state >> (i*4)) & 0xF] << (i*4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i, 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N*2, i)
return s
| 5,630 | 34.415094 | 100 |
py
|
randCache
|
randCache-main/Type-III/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.partition = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.cache_status = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block)
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
| 1,499 | 27.301887 | 132 |
py
|
randCache
|
randCache-main/Type-I/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
import random
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
# cli_args = parse_cli_args()
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
# vars(cli_args)['replacement_policy'] = 'lru'
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
timing_list = []
for word, timing in timing_vals.items():
timing_list.append(timing)
return timing_list
# if __name__ == '__main__':
# main([3, 80, 41, 786, 874, 875, 198, 456, 675, 325, 81, 142, 712, 564, 560, 345])
| 2,865 | 24.81982 | 88 |
py
|
randCache
|
randCache-main/Type-I/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Type-I/getHM.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 14:29:38 2021
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("timing_nopart_norand.txt", 'r') as f1:
file = f1.readlines()
timing = list(filter(None, list(map(lambda each:each.strip("\n"), file))))
count_hits = [0] * len(timing)
for i, rows in enumerate(timing):
rows = list(map(int, rows.split()))
count_hits[i] = 0
for item in rows:
count_hits[i] += 1
timing[i] = rows
count_hits = list(filter(lambda x: x > 0, count_hits))
index = []
for i in range(1, len(count_hits) + 1):
index.append(i)
fig=plt.figure(figsize=(8, 2))
ax = plt.gca()
plt.bar(index, count_hits, width=0.5)
plt.xlabel("probing locations", fontweight='bold', fontsize=12)
plt.ylabel("Number of missed", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("Type-I cache.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 1,073 | 22.866667 | 74 |
py
|
randCache
|
randCache-main/Type-I/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Type-I/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
class BinaryAddress(str):
partition = None
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
global partition
total_ways = num_partitions * ways_per_partition
if (num_partitions > total_ways):
num_partitions = total_ways
partition = random.randint(0, num_partitions - 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
global partition
ciphertext = self
start = len(ciphertext) - num_offset_bits - (num_partitions * num_index_bits)
end = len(ciphertext) - num_offset_bits
index = ciphertext[start:end]
if (len(index) != 0):
return index
else:
return None
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 1,945 | 29.40625 | 85 |
py
|
randCache
|
randCache-main/Type-I/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 200
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - ((ref.partition + 1) * num_index_bits)
end = len(ref.index) - (ref.partition * num_index_bits)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, word_addrs):
num_blocks = cache_size // num_words_per_block
num_sets = num_blocks // num_blocks_per_set
ways_per_partition = num_blocks_per_set // num_partitions
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition)
cache = Cache(num_sets = num_sets, num_index_bits = num_index_bits, num_partitions = num_partitions, ways_per_partition = ways_per_partition)
cache.read_refs(num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs)
timing_vals = self.emulate_timing(refs)
refs = self.set_index(num_partitions, num_index_bits, refs)
table_width = max((shutil.get_terminal_size((DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH))
# print()
# self.display_addr_refs(refs, table_width)
# print()
# self.display_cache(cache, table_width, refs)
# print()
return timing_vals
| 4,679 | 35.850394 | 163 |
py
|
randCache
|
randCache-main/Type-I/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
import random
class Cache(dict):
partition = None
cal_index = None
def __init__(self, cache=None, num_sets=None, num_index_bits=None, num_partitions = None, ways_per_partition = None):
self.recently_used_addrs = []
if cache is not None:
self.update(cache)
else:
for j in range(num_partitions):
for i in range(num_sets):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_index_bits)
self[str(j)+str(index)] = []
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def is_hit(self, addr_partition, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index) / num_partitions)
blocks = []
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
num_index_bits = int(len(addr_index) / num_partitions)
for i in range(num_partitions):
start = len(addr_index) - ((i + 1) * num_index_bits)
end = len(addr_index) - (i * num_index_bits)
actual_index = addr_index[start:end]
if (str(i)+str(actual_index)) in self:
if self[(str(i)+str(actual_index))] == []:
continue
else:
blocks = self[str(i)+str(actual_index)]
for block in blocks:
if (block['tag'] == addr_tag):
partition = i
cal_index = actual_index
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
if (replacement_policy == 'rand'):
repl_block_index = random.randint(0, (num_blocks_per_set // num_partition) - 1)
for (i, block) in enumerate(blocks):
if (i == repl_block_index):
blocks[i] = new_entry
return
if (replacement_policy == 'lru'):
recently_used_addrs = self.recently_used_addrs
for recent_index, recent_tag in recently_used_addrs:
for i, block in enumerate(blocks):
if (recent_index == addr_index and block['tag'] == recent_tag):
blocks[i] = new_entry
return
def set_block(self, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
num_index_bits = int(len(addr_index) / num_partition)
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
start = len(addr_index) - ((addr_partition + 1) * num_index_bits)
end = len(addr_index) - (addr_partition * num_index_bits)
addr_index = addr_index[start:end]
blocks = self[str(addr_partition)+ (str(addr_index).zfill(num_index_bits))]
if (len(blocks) == (num_blocks_per_set // num_partition)):
self.replace_block(blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry)
else:
blocks.append(new_entry)
def read_refs(self, num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs):
for ref in refs:
self.mark_ref_as_last_seen(ref)
if self.is_hit(ref.partition, ref.index, ref.tag, num_partitions):
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
else:
ref.cache_status = ReferenceCacheStatus.miss
self.set_block(
replacement_policy = replacement_policy,
num_blocks_per_set = num_blocks_per_set,
addr_partition = ref.partition,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block)
)
| 5,073 | 36.865672 | 132 |
py
|
randCache
|
randCache-main/Type-I/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.partition = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.cache_status = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block)
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
| 1,499 | 27.301887 | 132 |
py
|
randCache
|
randCache-main/Variant B (SAE)/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
from ast import literal_eval
import random
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'num-additional-tags':
self.num_additional_tags = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
if __name__ == '__main__':
random.seed(10)
address_list = []
with open('address_list.txt', 'r') as f:
file = f.read()
tup = literal_eval(file)
main(tup)
print("")
| 2,869 | 24.855856 | 73 |
py
|
randCache
|
randCache-main/Variant B (SAE)/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Variant B (SAE)/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Variant B (SAE)/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
from present import Present
class BinaryAddress(str):
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
partition = (0, 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
index1 = None; index2 = None;
plaintext = bin(int(self[:-(num_offset_bits)], 2))[2:].zfill(64)
key = bin(int('00000000000000000000', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - num_index_bits
end = len(ciphertext) - num_offset_bits
index1 = ciphertext[start:end]
key = bin(int('00000000000000001111', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - num_index_bits
end = len(ciphertext) - num_offset_bits
index2 = ciphertext[start:end]
return (index1, index2)
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 2,351 | 31.666667 | 80 |
py
|
randCache
|
randCache-main/Variant B (SAE)/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
#from cache import DataStore
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss', 'SAE/GL')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 180
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - num_index_bits
end = len(ref.index)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status,
ref.valid))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
print (blocks)
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks if 'data' in entry.keys()))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, num_additional_tags, word_addrs):
num_data_blocks = (cache_size//32) // num_words_per_block
num_sets_per_skew = (num_data_blocks // num_partitions) // num_blocks_per_set
num_tag_blocks_per_skew = num_sets_per_skew * (num_blocks_per_set + num_additional_tags)
num_data_blocks_per_skew = num_sets_per_skew * num_blocks_per_set
num_total_ways = num_blocks_per_set + num_additional_tags
print(num_data_blocks, num_sets_per_skew, num_tag_blocks_per_skew, num_data_blocks_per_skew)
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets_per_skew))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, num_tag_blocks_per_skew)
# print(refs)
cache = Cache(num_data_blocks = num_data_blocks, num_sets_per_skew = num_sets_per_skew, num_index_bits = num_index_bits, num_partitions = num_partitions, num_tag_blocks_per_skew = num_tag_blocks_per_skew, num_addr_bits = num_addr_bits, num_offset_bits = num_offset_bits, num_total_ways = num_total_ways)
#print(cache)
print("")
print("... cache is initialized - tag store will some valid and some invalid; data store with all valid and remaining invalid")
print("")
cache.read_refs(num_total_ways, num_partitions, replacement_policy, num_words_per_block, refs)
# table_width = max((shutil.get_terminal_size((DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH))
# print()
# self.display_addr_refs(refs, table_width)
# print()
# self.display_cache(cache, table_width, refs)
# print()
| 5,288 | 35.475862 | 311 |
py
|
randCache
|
randCache-main/Variant B (SAE)/filehandler.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 23:58:45 2021
@author: anirban
"""
class writeFile(object):
def write_address(plaintext, ciphertext):
with open('address_list.txt', 'a') as filehandle:
filehandle.writelines(str(plaintext))
filehandle.writelines("\t")
filehandle.writelines(str(ciphertext))
filehandle.writelines("\n")
def write_cache_details(self, word_addr, partition, index, status):
with open('cache_details.txt','a') as filehandle:
filehandle.writelines(str(word_addr))
filehandle.writelines("\t")
filehandle.writelines(str(partition))
filehandle.writelines("\t")
filehandle.writelines(str(index))
filehandle.writelines("\t")
filehandle.writelines(str(status))
filehandle.writelines("\n")
def write_eviction_status():
with open('eviction_status.txt','a') as filehandle:
filehandle.writelines("valid eviction")
filehandle.writelines("\n")
| 1,112 | 31.735294 | 71 |
py
|
randCache
|
randCache-main/Variant B (SAE)/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
from reference import ReferenceEvictionStatus
from filehandler import writeFile
import random
import math
class Cache(dict):
partition = None
cal_index = None
count_ref_index = 0
def __init__(self, tag_store = None, num_data_blocks = None, num_sets_per_skew = None, num_index_bits = None, num_partitions = None, num_tag_blocks_per_skew = None, num_addr_bits = None, num_offset_bits = None, num_total_ways = None):
self.recently_used_addrs = []
self.data_store = [-1 for i in range(num_data_blocks)]
if tag_store is not None:
self.update(tag_store)
else:
for j in range(num_partitions):
for i in range(num_sets_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[-int(math.log2(num_sets_per_skew)):]
self[str(j)+str(index)] = []
for k in range(num_total_ways):
if (random.randint(0, 1) == 0) :
self[str(j)+str(index)].append({'valid': 0, 'fptr': self.getDataStoreEntry(num_data_blocks = num_data_blocks, valid_flag = 0, encoded_position = str(j)+str(index)+str(k), data_status = 'invalid')})
else:
self[str(j)+str(index)].append({'valid': 1, 'fptr': self.getDataStoreEntry(num_data_blocks = num_data_blocks, valid_flag = 1, encoded_position = str(j)+str(index)+str(k), data_status = 'valid')})
data_store_filled_with_valid_entries = 0
for j in range(num_partitions):
for i in range(num_sets_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[-int(math.log2(num_sets_per_skew)):]
for k in range(num_total_ways):
if(self[str(j)+str(index)][k]['valid'] == 1):
data_store_filled_with_valid_entries += 1
empty_places = len(self.data_store) - data_store_filled_with_valid_entries
count_new_entries = 0
for j in range(num_partitions):
for i in range(num_sets_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[-int(math.log2(num_sets_per_skew)):]
for k in range(num_total_ways):
if (random.randint(0, 1) == 1 and self[str(j)+str(index)][k]['valid'] == 0 and count_new_entries <= empty_places):
self[str(j)+str(index)][k] = {'valid': 0, 'fptr': self.getDataStoreEntry(num_data_blocks = num_data_blocks, valid_flag = 1, encoded_position = str(j)+str(index)+str(k), data_status = 'invalid')}
count_new_entries += 1
def getDataStoreEntry(self, num_data_blocks, valid_flag, encoded_position, data_status):
if (valid_flag):
for i in range(num_data_blocks):
dsindex = random.randint(0, num_data_blocks - 1)
if (self.data_store[dsindex] == -1):
self.data_store[dsindex] = [encoded_position, data_status]
return dsindex
else:
continue
if (valid_flag == 0):
return None
def do_random_GLE(self, new_tag_index, new_way_index):
eviction_index = random.randint(0, len(self.data_store) - 1)
self.data_store[eviction_index][0] = str(new_tag_index) + str(new_way_index)
self.data_store[eviction_index][1] = 'valid'
return eviction_index
# if (self.data_store[eviction_index][1] == 'invalid'):
# self.data_store[eviction_index][0] = str(new_tag_index) + str(new_way_index)
# self.data_store[eviction_index][1] = 'valid'
# return eviction_index
# if (self.data_store[eviction_index][1] == 'valid'):
# self.data_store[eviction_index][0] = str(new_tag_index) + str(new_way_index)
# self.data_store[eviction_index][1] = 'valid'
# return eviction_index
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def load_balancing(self, index_tuple, num_partitions):
block1 = self[str(0)+str(index_tuple[0])]
block2 = self[str(1)+str(index_tuple[1])]
# print(block1)
# print(block2)
block1_valid_count = 0
block2_valid_count = 0
for block in block1:
if block['valid']:
block1_valid_count += 1
for block in block2:
if block['valid']:
block2_valid_count += 1
if block1_valid_count < block2_valid_count:
return (0, block1_valid_count)
elif block1_valid_count > block2_valid_count:
return (1, block2_valid_count)
else:
randint = random.randint(0,1)
if (randint == 0):
return (0, block1_valid_count)
else:
return (1, block2_valid_count)
def is_hit(self, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index[0]))
blocks = []
if addr_index[0] is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
for i in range(num_partitions):
actual_index = str(i)+str(addr_index[i])
empty_set = True
if (actual_index) in self:
blocks = self[actual_index]
for block in blocks:
if 'tag' in block.keys():
empty_set = False
break
if empty_set == True:
continue
else:
for block in blocks:
if ('tag' in block.keys() and block['tag'] == addr_tag):
partition = i
cal_index = addr_index[i]
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_tags_per_set, skew, valid_count, num_partition, addr_index, new_entry, count_ref_index):
if (replacement_policy == 'rand'):
if (valid_count < num_tags_per_set):
repl_block_index = -1
for (index, block) in enumerate(blocks):
if block['valid'] == 0:
repl_block_index = index
tag_index = str(skew) + str(addr_index)
new_entry['fptr'] = self.do_random_GLE(tag_index, index)
break
blocks[repl_block_index] = new_entry
return
else:
print("valid eviction")
writeFile.write_eviction_status()
repl_block_index = random.randint(0, num_tags_per_set - 1)
for (index, block) in enumerate(blocks):
if (index == repl_block_index):
new_entry['fptr'] = block['fptr']
blocks[index] = new_entry
return
def set_block(self, replacement_policy, num_tags_per_set, num_partition, addr_index, new_entry, count_ref_index):
global partition
global cal_index
num_index_bits = int(len(addr_index[0]))
if addr_index[0] is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
skew, count_valid = self.load_balancing(index_tuple = addr_index, num_partitions = num_partition)
blocks = self[str(skew)+str(addr_index[skew])]
self.replace_block(blocks, replacement_policy, num_tags_per_set, skew, count_valid, num_partition, addr_index[skew], new_entry, count_ref_index)
partition = skew
cal_index = addr_index[skew]
def read_refs(self, num_total_tags_per_set, num_partitions, replacement_policy, num_words_per_block, refs):
count_ref_index = 0
for ref in refs:
if self.is_hit(ref.index, ref.tag, num_partitions):
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
ref.valid = 1
else:
ref.cache_status = ReferenceCacheStatus.miss
self.set_block(
replacement_policy = replacement_policy,
num_tags_per_set = num_total_tags_per_set,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block),
count_ref_index = count_ref_index
)
ref.partition = partition
ref.index = cal_index
ref.valid = 1
count_ref_index += 1
| 9,582 | 44.417062 | 238 |
py
|
randCache
|
randCache-main/Variant B (SAE)/present.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 23:59:35 2021
@author: anirban
"""
class Present:
def __init__(self,key,rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if (len(key) == 80):
self.roundkeys = generateRoundkeys80(string2number(key),self.rounds)
elif (len(key) == 128):
self.roundkeys = generateRoundkeys128(string2number(key),self.rounds)
else:
raise ValueError("Key must be a 128-bit or 80-bit rawstring")
def encrypt(self,block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state,self.roundkeys[-1])
return number2string_N(cipher,8)
def decrypt(self,block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[-i-1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state,self.roundkeys[0])
return number2string_N(decipher,8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox= [0xc,0x5,0x6,0xb,0x9,0x0,0xa,0xd,0x3,0xe,0xf,0x8,0x4,0x7,0x1,0x2]
Sbox_inv = [Sbox.index(x) for x in range(16)]
PBox = [0,16,32,48,1,17,33,49,2,18,34,50,3,19,35,51,
4,20,36,52,5,21,37,53,6,22,38,54,7,23,39,55,
8,24,40,56,9,25,41,57,10,26,42,58,11,27,43,59,
12,28,44,60,13,29,45,61,14,30,46,62,15,31,47,63]
PBox_inv = [PBox.index(x) for x in range(64)]
def generateRoundkeys80(key,rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >>16)
#1. Shift
#rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2**19-1)) << 61) + (key >> 19)
#2. SBox
#rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76)+(key & (2**76-1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key,rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >>64)
#1. Shift
key = ((key & (2**67-1)) << 61) + (key >> 67)
#2. SBox
key = (Sbox[key >> 124] << 124)+(Sbox[(key >> 120) & 0xF] << 120)+(key & (2**120-1))
#3. Salt
#rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state,roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox[( state >> (i*4)) & 0xF] << (i*4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox_inv[( state >> (i*4)) & 0xF] << (i*4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i, 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N*2, i)
return s
| 5,630 | 34.415094 | 100 |
py
|
randCache
|
randCache-main/Variant B (SAE)/plotter.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 21:19:51 2022
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("outfile.txt", 'r') as f1:
file = f1.readlines()
X = []; Y = []
for line in file:
cache_size = line.strip("\n").split(":")[0]
throw_count = line.strip("\n").split(":")[1]
X.append(cache_size)
Y.append(int(throw_count))
FIG_SIZE = (6,2) #Horizantal and Vertical dimension of figure
GRID_ALPHA = 0.5 #Intensity of gridlines
obj = ['100K','200K','800K','1.5M'] #labelling of points
fig, ax = plt.subplots(figsize = FIG_SIZE)
#Comment this for removing gridlines
ax.grid(which = 'both', alpha = GRID_ALPHA, linestyle = 'dotted')
#Set the labelsize of the tick values
ax.tick_params(colors='black',labelsize=10)
#X labelsize and values
ax.set_xlabel('Size of Cache in MB', fontsize = 10, fontweight='bold')
#Y labelsize and values
ax.set_ylabel("Number of"+"\n"+"balls thrown", fontsize = 10, fontweight='bold')
#Plotting with markertype, color, and linewidth
ax.plot(X, Y, marker="o", markerfacecolor='blue', ms=5, color="blue", linewidth=1)
for i,label in enumerate(obj):
plt.annotate(label, # this is the text
(X[i],Y[i]), # these are the coordinates to position the label
textcoords="offset points", # how to position the text
xytext=(0,8), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=10) #fontsize
fig.savefig('Figure7_HPCAPaper.png', bbox_inches = "tight", dpi=200)
fig.show()
| 1,638 | 33.145833 | 82 |
py
|
randCache
|
randCache-main/Variant B (SAE)/rand_num_gen.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 1 12:16:04 2022
@author: anirban
"""
import random
import os
import math
outfile = "outfile.txt"
eviction_file = "eviction_status.txt"
base = 16777216
if os.path.exists(outfile):
os.remove(outfile)
with open("config.ini", "r") as f:
lines = f.readlines()
# cache_size = [16, 32, 64, 128]
# throws_list = [100000, 200000, 800000, 1500000]
cache_size = [16]
throws_list = [100000]
my_dict = {}
for j in range(len(cache_size)):
if os.path.exists(eviction_file):
os.remove(eviction_file)
with open("config.ini", "w") as f:
for line in lines:
if line.strip("\n").startswith("cache-size"):
f.write("cache-size="+str(int(base * (int(math.pow(2,j)))))+"\n")
else:
f.write(line)
num = []
for i in range(throws_list[j]):
num.append(random.randint(0, 100000000))
with open('address_list.txt', 'w') as filehandle:
filehandle.writelines(str(num))
os.system("python3 main.py")
with open(eviction_file) as f:
first_line = f.readline().strip('\n')
if first_line.strip("\n").startswith("valid"):
my_dict[str(cache_size[j])] = throws_list[j]
else:
my_dict[str(cache_size[j])] = -1
with open(outfile, "a") as fout:
for key, value in my_dict.items():
fout.write('%s:%s\n' % (key, value))
| 1,426 | 23.186441 | 81 |
py
|
randCache
|
randCache-main/Variant B (SAE)/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.skew = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.valid = 0
self.cache_status = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'valid': 1,
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block),
'fptr': None
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
class ReferenceEvictionStatus(Enum):
valid = 1
invalid = 0
def __str__(self):
if self.value == ReferenceEvictionStatus.valid.value:
return 'SAE'
else:
return 'global'
__repr__ = __str__
| 1,829 | 25.911765 | 132 |
py
|
randCache
|
randCache-main/mirage_bucket_and_balls/plotter.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 21:19:51 2022
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("mirage_bucket_ball_data.txt", 'r') as f1:
file = f1.readlines()
timing = list(map(int, list(map(lambda each:each.strip("\n"), file))))
index = [8, 9, 10, 11, 12, 13, 14]
fig=plt.figure(figsize=(8, 3))
ax = plt.gca()
plt.bar(index, timing, width=0.5)
plt.xlabel("Number of ways (associativity)", fontweight='bold', fontsize=12)
plt.ylabel("Number of trials \n per collision", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("mirage_bucket_and_balls.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 815 | 23.727273 | 79 |
py
|
randCache
|
randCache-main/Variant A/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
import random
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
# cli_args = parse_cli_args()
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
# vars(cli_args)['replacement_policy'] = 'lru'
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
timing_list = []
for word, timing in timing_vals.items():
timing_list.append(timing)
return timing_list
| 2,737 | 24.830189 | 68 |
py
|
randCache
|
randCache-main/Variant A/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Variant A/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Variant A/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
from present import Present
class BinaryAddress(str):
partition = None
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
global partition
total_ways = num_partitions * ways_per_partition
if (num_partitions > total_ways):
num_partitions = total_ways
partition = random.randint(0, num_partitions - 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
global partition
plaintext = bin(int(self[:-(num_offset_bits)], 2))[2:].zfill(64)
key = bin(int('00000000000000000000', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - (num_partitions * num_index_bits)
end = len(ciphertext) - num_offset_bits
index = ciphertext[start:end]
if (len(index) != 0):
return index
else:
return None
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 2,227 | 31.289855 | 85 |
py
|
randCache
|
randCache-main/Variant A/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 200
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - ((ref.partition + 1) * num_index_bits)
end = len(ref.index) - (ref.partition * num_index_bits)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, word_addrs):
num_blocks = cache_size // num_words_per_block
num_sets = num_blocks // num_blocks_per_set
ways_per_partition = num_blocks_per_set // num_partitions
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition)
cache = Cache(num_sets = num_sets, num_index_bits = num_index_bits, num_partitions = num_partitions, ways_per_partition = ways_per_partition)
cache.read_refs(num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs)
timing_vals = self.emulate_timing(refs)
refs = self.set_index(num_partitions, num_index_bits, refs)
table_width = max((shutil.get_terminal_size((DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH))
# print()
# self.display_addr_refs(refs, table_width)
# print()
# self.display_cache(cache, table_width, refs)
# print()
return timing_vals
| 4,679 | 35.850394 | 163 |
py
|
randCache
|
randCache-main/Variant A/getHM_ppp.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 14:29:38 2021
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("timing_ppp_algorithm_run_4.txt", 'r') as f1:
file = f1.readlines()
timing = list(filter(None, list(map(lambda each:each.strip("\n"), file))))
count_hits = [0] * len(timing)
for i, rows in enumerate(timing):
rows = list(map(int, rows.split()))
count_hits[i] = 0
for item in rows:
count_hits[i] += 1
timing[i] = rows
count_hits = list(filter(lambda x: x > 0, count_hits))
index = []
for i in range(1, len(count_hits) + 1):
index.append(i)
fig=plt.figure(figsize=(8, 2))
ax = plt.gca()
plt.bar(index, count_hits, width=0.5)
plt.xlabel("Number of ways (associativity)", fontweight='bold', fontsize=12)
plt.ylabel("Number of trials \n per collision", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("scattercache_ppp_algo.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 1,118 | 23.866667 | 79 |
py
|
randCache
|
randCache-main/Variant A/getHM_new.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 14:29:38 2021
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("timing_new_algorithm_run_4.txt", 'r') as f1:
file = f1.readlines()
timing = list(filter(None, list(map(lambda each:each.strip("\n"), file))))
count_hits = [0] * len(timing)
for i, rows in enumerate(timing):
rows = list(map(int, rows.split()))
count_hits[i] = 0
for item in rows:
count_hits[i] += 1
timing[i] = rows
count_hits = list(filter(lambda x: x > 0, count_hits))
index = []
for i in range(1, len(count_hits) + 1):
index.append(i)
fig=plt.figure(figsize=(8, 2))
ax = plt.gca()
plt.bar(index, count_hits, width=0.5)
plt.xlabel("Number of ways (associativity)", fontweight='bold', fontsize=12)
plt.ylabel("Number of trials \n per collision", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("scattercache_new_algo.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 1,118 | 23.866667 | 79 |
py
|
randCache
|
randCache-main/Variant A/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
import random
class Cache(dict):
partition = None
cal_index = None
def __init__(self, cache=None, num_sets=None, num_index_bits=None, num_partitions = None, ways_per_partition = None):
self.recently_used_addrs = []
if cache is not None:
self.update(cache)
else:
for j in range(num_partitions):
for i in range(num_sets):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_index_bits)
self[str(j)+str(index)] = []
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def is_hit(self, addr_partition, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index) / num_partitions)
blocks = []
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
num_index_bits = int(len(addr_index) / num_partitions)
for i in range(num_partitions):
start = len(addr_index) - ((i + 1) * num_index_bits)
end = len(addr_index) - (i * num_index_bits)
actual_index = addr_index[start:end]
if (str(i)+str(actual_index)) in self:
if self[(str(i)+str(actual_index))] == []:
continue
else:
blocks = self[str(i)+str(actual_index)]
for block in blocks:
if (block['tag'] == addr_tag):
partition = i
cal_index = actual_index
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
if (replacement_policy == 'rand'):
repl_block_index = random.randint(0, (num_blocks_per_set // num_partition) - 1)
for (i, block) in enumerate(blocks):
if (i == repl_block_index):
blocks[i] = new_entry
return
if (replacement_policy == 'lru'):
recently_used_addrs = self.recently_used_addrs
for recent_index, recent_tag in recently_used_addrs:
for i, block in enumerate(blocks):
if (recent_index == addr_index and block['tag'] == recent_tag):
blocks[i] = new_entry
return
def set_block(self, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
num_index_bits = int(len(addr_index) / num_partition)
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
start = len(addr_index) - ((addr_partition + 1) * num_index_bits)
end = len(addr_index) - (addr_partition * num_index_bits)
addr_index = addr_index[start:end]
blocks = self[str(addr_partition)+ (str(addr_index).zfill(num_index_bits))]
if (len(blocks) == (num_blocks_per_set // num_partition)):
self.replace_block(blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry)
else:
blocks.append(new_entry)
def read_refs(self, num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs):
for ref in refs:
self.mark_ref_as_last_seen(ref)
if self.is_hit(ref.partition, ref.index, ref.tag, num_partitions):
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
else:
ref.cache_status = ReferenceCacheStatus.miss
self.set_block(
replacement_policy = replacement_policy,
num_blocks_per_set = num_blocks_per_set,
addr_partition = ref.partition,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block)
)
| 5,073 | 36.865672 | 132 |
py
|
randCache
|
randCache-main/Variant A/present.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 23:59:35 2021
@author: anirban
"""
class Present:
def __init__(self,key,rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if (len(key) == 80):
self.roundkeys = generateRoundkeys80(string2number(key),self.rounds)
elif (len(key) == 128):
self.roundkeys = generateRoundkeys128(string2number(key),self.rounds)
else:
raise ValueError("Key must be a 128-bit or 80-bit rawstring")
def encrypt(self,block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state,self.roundkeys[-1])
return number2string_N(cipher,8)
def decrypt(self,block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[-i-1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state,self.roundkeys[0])
return number2string_N(decipher,8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox= [0xc,0x5,0x6,0xb,0x9,0x0,0xa,0xd,0x3,0xe,0xf,0x8,0x4,0x7,0x1,0x2]
Sbox_inv = [Sbox.index(x) for x in range(16)]
PBox = [0,16,32,48,1,17,33,49,2,18,34,50,3,19,35,51,
4,20,36,52,5,21,37,53,6,22,38,54,7,23,39,55,
8,24,40,56,9,25,41,57,10,26,42,58,11,27,43,59,
12,28,44,60,13,29,45,61,14,30,46,62,15,31,47,63]
PBox_inv = [PBox.index(x) for x in range(64)]
def generateRoundkeys80(key,rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >>16)
#1. Shift
#rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2**19-1)) << 61) + (key >> 19)
#2. SBox
#rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76)+(key & (2**76-1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key,rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >>64)
#1. Shift
key = ((key & (2**67-1)) << 61) + (key >> 67)
#2. SBox
key = (Sbox[key >> 124] << 124)+(Sbox[(key >> 120) & 0xF] << 120)+(key & (2**120-1))
#3. Salt
#rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state,roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox[( state >> (i*4)) & 0xF] << (i*4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox_inv[( state >> (i*4)) & 0xF] << (i*4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i, 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N*2, i)
return s
| 5,630 | 34.415094 | 100 |
py
|
randCache
|
randCache-main/Variant A/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.partition = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.cache_status = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block)
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
| 1,499 | 27.301887 | 132 |
py
|
randCache
|
randCache-main/Variant B/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
import random
from ast import literal_eval
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'num-additional-tags':
self.num_additional_tags = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
timing_list = []
for word, timing in timing_vals.items():
timing_list.append(timing)
return timing_list
#if __name__ == '__main__':
# main([3, 80, 41, 786, 874, 875, 198, 456, 675, 325, 81, 142, 712, 564, 560, 345])
| 2,931 | 25.414414 | 86 |
py
|
randCache
|
randCache-main/Variant B/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Variant B/getHM.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 14:29:38 2021
@author: anirban
"""
import matplotlib.pyplot as plt
with open("timing_new_algorithm_run_4.txt", 'r') as f1:
file = f1.readlines()
timing = list(filter(None, list(map(lambda each:each.strip("\n"), file))))
count_hits = [0] * len(timing)
for i, rows in enumerate(timing):
rows = list(map(int, rows.split()))
count_hits[i] = 0
for item in rows:
count_hits[i] += 1
timing[i] = rows
count_hits = list(filter(lambda x: x > 0, count_hits))
index = []
for i in range(1, len(count_hits) + 1):
index.append(i)
fig=plt.figure(figsize=(8, 2))
ax = plt.gca()
plt.bar(index, count_hits, width=0.5)
plt.xlabel("Number of ways (associativity)", fontweight='bold', fontsize=12)
plt.ylabel("Number of trials \n per collision", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("mirage_eviction_set.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 1,096 | 24.511628 | 79 |
py
|
randCache
|
randCache-main/Variant B/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Variant B/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
from present import Present
class BinaryAddress(str):
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
partition = (0, 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
index1 = None; index2 = None;
plaintext = bin(int(self[:-(num_offset_bits)], 2))[2:].zfill(64)
key = bin(int('00000000000000002222', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - num_index_bits
end = len(ciphertext) - num_offset_bits
index1 = ciphertext[start:end]
key = bin(int('00000000000000001111', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - num_index_bits
end = len(ciphertext) - num_offset_bits
index2 = ciphertext[start:end]
return (index1, index2)
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 2,353 | 30.810811 | 80 |
py
|
randCache
|
randCache-main/Variant B/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss', 'SAE/GL')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 180
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - num_index_bits
end = len(ref.index)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status,
ref.eviction_type))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks if 'data' in entry.keys()))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, num_additional_tags, word_addrs):
num_data_blocks = (cache_size//32) // num_words_per_block
num_sets_per_skew = (num_data_blocks // num_partitions) // num_blocks_per_set
num_tag_blocks_per_skew = num_sets_per_skew * (num_blocks_per_set + num_additional_tags)
num_total_ways = num_blocks_per_set + num_additional_tags
# print(num_data_blocks, num_sets_per_skew, num_tag_blocks_per_skew)
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets_per_skew))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
cache = Cache(num_data_blocks = num_data_blocks, num_sets_per_skew = num_sets_per_skew, num_index_bits = num_index_bits, num_partitions = num_partitions, num_tag_blocks_per_skew = num_tag_blocks_per_skew, num_addr_bits = num_addr_bits, num_offset_bits = num_offset_bits, num_total_ways = num_total_ways)
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, num_tag_blocks_per_skew)
cache.read_refs(num_total_ways, num_partitions, replacement_policy, num_words_per_block, refs)
timing_vals = self.emulate_timing(refs)
table_width = max((shutil.get_terminal_size((DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH))
#
#
# print()
# self.display_addr_refs(refs, table_width)
# print()
# self.display_cache(cache, table_width, refs)
# print()
return timing_vals
| 5,014 | 36.706767 | 311 |
py
|
randCache
|
randCache-main/Variant B/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
from reference import ReferenceEvictionStatus
import random
class Cache(dict):
partition = None
cal_index = None
count_ref_index = 0
eviction_status = 0
def __init__(self, data_store = None, tag_store = None, num_data_blocks = None, num_sets_per_skew = None, num_index_bits = None, num_partitions = None, num_tag_blocks_per_skew = None, num_addr_bits = None, num_offset_bits = None, num_total_ways = None):
self.recently_used_addrs = []
if tag_store is not None:
self.update(tag_store)
else:
for j in range(num_partitions):
for i in range(num_tag_blocks_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[(num_addr_bits - num_offset_bits - num_index_bits):(num_addr_bits - num_offset_bits)]
self[str(j)+str(index)] = []
for k in range(num_total_ways):
if (random.randint(0, 1) == 0) :
self[str(j)+str(index)].append({'valid': 0})
else:
self[str(j)+str(index)].append({'valid': 1})
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def load_balancing(self, index_tuple, num_partitions):
block1 = self[str(0)+str(index_tuple[0])]
block2 = self[str(1)+str(index_tuple[1])]
block1_valid_count = 0
block2_valid_count = 0
for block in block1:
if block['valid']:
block1_valid_count += 1
for block in block2:
if block['valid']:
block2_valid_count += 1
if block1_valid_count < block2_valid_count:
return (0, block1_valid_count)
elif block1_valid_count > block2_valid_count:
return (1, block2_valid_count)
else:
randint = random.randint(0,1)
if (randint == 0):
return (0, block1_valid_count)
else:
return (1, block2_valid_count)
def is_hit(self, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index[0]))
blocks = []
if addr_index[0] is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
for i in range(num_partitions):
actual_index = str(i)+str(addr_index[i])
empty_set = True
if (actual_index) in self:
blocks = self[actual_index]
for block in blocks:
if 'tag' in block.keys():
empty_set = False
break
if empty_set == True:
continue
else:
for block in blocks:
if ('tag' in block.keys() and block['tag'] == addr_tag):
partition = i
cal_index = addr_index[i]
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_tags_per_set, skew, valid_count, num_partition, addr_index, new_entry, count_ref_index):
global eviction_status
if (replacement_policy == 'rand'):
if (valid_count < num_tags_per_set):
repl_block_index = -1
for (index, block) in enumerate(blocks):
if block['valid'] == 0:
repl_block_index = index
break
blocks[repl_block_index] = new_entry
return
else:
# print("valid eviction")
eviction_status = 1
repl_block_index = random.randint(0, num_tags_per_set - 1)
for (index, block) in enumerate(blocks):
if (index == repl_block_index):
blocks[index] = new_entry
return
def set_block(self, replacement_policy, num_tags_per_set, num_partition, addr_index, new_entry, count_ref_index):
global partition
global cal_index
num_index_bits = int(len(addr_index[0]))
if addr_index[0] is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
skew, count_valid = self.load_balancing(index_tuple = addr_index, num_partitions = num_partition)
blocks = self[str(skew)+str(addr_index[skew])]
self.replace_block(blocks, replacement_policy, num_tags_per_set, skew, count_valid, num_partition, addr_index[skew], new_entry, count_ref_index)
partition = skew
cal_index = addr_index[skew]
def read_refs(self, num_total_tags_per_set, num_partitions, replacement_policy, num_words_per_block, refs):
count_ref_index = 0
eviction_status = 0
for ref in refs:
if self.is_hit(ref.index, ref.tag, num_partitions):
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
ref.valid = 1
ref.eviction_type = ReferenceEvictionStatus.invalid
else:
ref.cache_status = ReferenceCacheStatus.miss
if eviction_status:
ref.eviction_type = ReferenceEvictionStatus.valid
self.set_block(
replacement_policy = replacement_policy,
num_tags_per_set = num_total_tags_per_set,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block),
count_ref_index = count_ref_index
)
ref.partition = partition
ref.index = cal_index
ref.valid = 1
count_ref_index += 1
| 6,699 | 40.875 | 257 |
py
|
randCache
|
randCache-main/Variant B/present.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 23:59:35 2021
@author: anirban
"""
class Present:
def __init__(self,key,rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if (len(key) == 80):
self.roundkeys = generateRoundkeys80(string2number(key),self.rounds)
elif (len(key) == 128):
self.roundkeys = generateRoundkeys128(string2number(key),self.rounds)
else:
raise ValueError("Key must be a 128-bit or 80-bit rawstring")
def encrypt(self,block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state,self.roundkeys[-1])
return number2string_N(cipher,8)
def decrypt(self,block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[-i-1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state,self.roundkeys[0])
return number2string_N(decipher,8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox= [0xc,0x5,0x6,0xb,0x9,0x0,0xa,0xd,0x3,0xe,0xf,0x8,0x4,0x7,0x1,0x2]
Sbox_inv = [Sbox.index(x) for x in range(16)]
PBox = [0,16,32,48,1,17,33,49,2,18,34,50,3,19,35,51,
4,20,36,52,5,21,37,53,6,22,38,54,7,23,39,55,
8,24,40,56,9,25,41,57,10,26,42,58,11,27,43,59,
12,28,44,60,13,29,45,61,14,30,46,62,15,31,47,63]
PBox_inv = [PBox.index(x) for x in range(64)]
def generateRoundkeys80(key,rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >>16)
#1. Shift
#rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2**19-1)) << 61) + (key >> 19)
#2. SBox
#rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76)+(key & (2**76-1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key,rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >>64)
#1. Shift
key = ((key & (2**67-1)) << 61) + (key >> 67)
#2. SBox
key = (Sbox[key >> 124] << 124)+(Sbox[(key >> 120) & 0xF] << 120)+(key & (2**120-1))
#3. Salt
#rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state,roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox[( state >> (i*4)) & 0xF] << (i*4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox_inv[( state >> (i*4)) & 0xF] << (i*4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i, 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N*2, i)
return s
| 5,630 | 34.415094 | 100 |
py
|
randCache
|
randCache-main/Variant B/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.skew = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.valid = 0
self.cache_status = None
self.eviction_type = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'valid': 1,
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block)
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
class ReferenceEvictionStatus(Enum):
valid = 1
invalid = 0
def __str__(self):
if self.value == ReferenceEvictionStatus.valid.value:
return 'SAE'
else:
return 'global'
__repr__ = __str__
| 1,851 | 25.084507 | 132 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
from ast import literal_eval
import random
import os
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'num-additional-tags':
self.num_additional_tags = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
if __name__ == '__main__':
random.seed(7)
# num = []
# for i in range(1000000):
# num.append(random.randint(0, 100000000))
# with open('address_list.txt', 'w') as filehandle:
# filehandle.writelines(str(num))
outfile = 'eviction_status.txt'
if os.path.exists(outfile):
os.remove(outfile)
with open('address_list.txt', 'r') as f:
file = f.read()
tup = literal_eval(file)
main(tup)
print("")
| 3,156 | 25.090909 | 73 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
from present import Present
class BinaryAddress(str):
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
partition = (0, 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
index1 = None; index2 = None;
plaintext = bin(int(self[:-(num_offset_bits)], 2))[2:].zfill(64)
key = bin(int('00000000000000000000', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - num_index_bits
end = len(ciphertext) - num_offset_bits
index1 = ciphertext[start:end]
key = bin(int('00000000000000001111', 16))[2:].zfill(80)
cipher = Present(key)
ciphertext = cipher.encrypt(plaintext)
ciphertext = str(bin(int(ciphertext, 16))[2:].zfill(64))
start = len(ciphertext) - num_offset_bits - num_index_bits
end = len(ciphertext) - num_offset_bits
index2 = ciphertext[start:end]
return (index1, index2)
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 2,351 | 31.666667 | 80 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss', 'SAE/GL')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 180
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - num_index_bits
end = len(ref.index)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status,
ref.valid))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
print (blocks)
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks if 'data' in entry.keys()))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, num_additional_tags, word_addrs):
num_data_blocks = (cache_size//32) // num_words_per_block
num_sets_per_skew = (num_data_blocks // num_partitions) // num_blocks_per_set
num_tag_blocks_per_skew = num_sets_per_skew * (num_blocks_per_set + num_additional_tags)
num_data_blocks_per_skew = num_sets_per_skew * num_blocks_per_set
num_total_ways = num_blocks_per_set + num_additional_tags
print(num_data_blocks, num_sets_per_skew, num_tag_blocks_per_skew, num_data_blocks_per_skew)
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets_per_skew))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
# create metadata for each address - tag, index (after encryption), partition/skew, offset
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, num_tag_blocks_per_skew)
# initialize the cache - tag store and data store
# in tag store, each set is filled with some valid and some invalid entries
# in data store, all the valid entries are present. remaining slots are filled with either invalid entries or null
cache = Cache(num_data_blocks = num_data_blocks, num_sets_per_skew = num_sets_per_skew, num_index_bits = num_index_bits, num_partitions = num_partitions, num_tag_blocks_per_skew = num_tag_blocks_per_skew, num_addr_bits = num_addr_bits, num_offset_bits = num_offset_bits, num_total_ways = num_total_ways)
# print(cache)
# print("")
# print(cache.data_store)
print("")
print("... cache is initialized - tag store holds some valid and some invalid tags; data store filled with all valid and remaining invalid")
print("")
# allocate each address into corresponding sets in tag store and random place in data store
cache.read_refs(num_total_ways, num_partitions, replacement_policy, num_words_per_block, num_index_bits, refs)
print ("Valid Tags : " + str(cache.count_valid_tags()) + ", Cache Capacity : " + str(num_data_blocks))
| 5,607 | 39.345324 | 311 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/filehandler.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 23:58:45 2021
@author: anirban
"""
class writeFile(object):
def write_address(plaintext, ciphertext):
with open('address_list.txt', 'a') as filehandle:
filehandle.writelines(str(plaintext))
filehandle.writelines("\t")
filehandle.writelines(str(ciphertext))
filehandle.writelines("\n")
def write_cache_details(self, word_addr, partition, index, status):
with open('cache_details.txt','a') as filehandle:
filehandle.writelines(str(word_addr))
filehandle.writelines("\t")
filehandle.writelines(str(partition))
filehandle.writelines("\t")
filehandle.writelines(str(index))
filehandle.writelines("\t")
filehandle.writelines(str(status))
filehandle.writelines("\n")
def write_eviction_status():
with open('eviction_status.txt','a') as filehandle:
filehandle.writelines("valid eviction")
filehandle.writelines("\n")
| 1,112 | 31.735294 | 71 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
from reference import ReferenceEvictionStatus
from filehandler import writeFile
import random
import math
class Cache(dict):
partition = None
cal_index = None
count_ref_index = 0
# constructor: initialize the cache
def __init__(self, tag_store = None, num_data_blocks = None, num_sets_per_skew = None, num_index_bits = None, num_partitions = None, num_tag_blocks_per_skew = None, num_addr_bits = None, num_offset_bits = None, num_total_ways = None):
self.recently_used_addrs = []
self.data_store = [-1 for i in range(num_data_blocks)] # initialize data store with -1
if tag_store is not None:
self.update(tag_store)
else:
for j in range(num_partitions):
for i in range(num_sets_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[-int(math.log2(num_sets_per_skew)):]
self[str(j)+str(index)] = []
for k in range(num_total_ways):
if (random.randint(0, 1) == 0) : # randomly fill with valid or invalid entry in tag store and simultneously add an entry in datastore
self[str(j)+str(index)].append({'valid': 0, 'fptr': self.getDataStoreEntry(num_data_blocks = num_data_blocks, valid_flag = 0, encoded_position = str(j)+str(index)+str(k), data_status = 'invalid')})
else:
self[str(j)+str(index)].append({'valid': 1, 'fptr': self.getDataStoreEntry(num_data_blocks = num_data_blocks, valid_flag = 1, encoded_position = str(j)+str(index)+str(k), data_status = 'valid')})
# count the number of entries in data store filled with valid addresses
data_store_filled_with_valid_entries = 0
for j in range(num_partitions):
for i in range(num_sets_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[-int(math.log2(num_sets_per_skew)):]
for k in range(num_total_ways):
if(self[str(j)+str(index)][k]['valid'] == 1):
data_store_filled_with_valid_entries += 1
# fill the remaining places in data store with invalid addresses
empty_places = len(self.data_store) - data_store_filled_with_valid_entries
count_new_entries = 0
for j in range(num_partitions):
for i in range(num_sets_per_skew):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_addr_bits)[-int(math.log2(num_sets_per_skew)):]
for k in range(num_total_ways):
if (random.randint(0, 1) == 1 and self[str(j)+str(index)][k]['valid'] == 0 and count_new_entries <= empty_places):
self[str(j)+str(index)][k] = {'valid': 0, 'fptr': self.getDataStoreEntry(num_data_blocks = num_data_blocks, valid_flag = 1, encoded_position = str(j)+str(index)+str(k), data_status = 'invalid')}
count_new_entries += 1
def count_valid_tags(self):
num_valid_tags = 0
for key,blocks in self.items():
for block in blocks:
if block['valid']:
num_valid_tags += 1
return (num_valid_tags)
def getDataStoreEntry(self, num_data_blocks, valid_flag, encoded_position, data_status):
if (valid_flag == 1):
for i in range(num_data_blocks):
dsindex = random.randint(0, num_data_blocks - 1) # randomly select an index in data store
if (self.data_store[dsindex] == -1): # check if it contains any entry
self.data_store[dsindex] = [encoded_position, data_status] # if not, then assign it to the address
return dsindex # return the data store index for FPTR to tag store
else:
continue
if (valid_flag == 0):
return None
def invalidate_using_rptr(self, eviction_index, num_index_bits):
if(self.data_store[eviction_index] == -1): # boundary case when the data store entry to be removed does not have any RPTR
return
rptr_entry = self.data_store[eviction_index][0][0:(num_index_bits+1)] # RPTR = cache set where the invalidated tag resides in tag store
rptr_status = self.data_store[eviction_index][1] # status of the removed entry. whether a valid or invalid entry has been removed
rptr_way = int(self.data_store[eviction_index][0][(num_index_bits+1):]) # way number inside the selected cache set of the invalidated tag
self[rptr_entry][rptr_way] = {'valid': 0, 'fptr': None} # invalidate the tag by making it invalid and FPTR = null
def do_random_GLE(self, new_tag_index, new_way_index, num_index_bits):
eviction_index = random.randint(0, len(self.data_store) - 1) # select random index for removal from data store
self.invalidate_using_rptr(eviction_index, num_index_bits) # before removal, invalidate the entry in tag store
if (self.data_store[eviction_index] == -1): # boundary case when the data store entry to be removed does not have any RPTR
self.data_store[eviction_index] = [str(new_tag_index) + str(new_way_index), 'valid'] # RPTR = new tag entry
else:
self.data_store[eviction_index][0] = str(new_tag_index) + str(new_way_index) # RPTR = new tag entry
self.data_store[eviction_index][1] = 'valid'
return eviction_index # return the data store index for FPTR entry in tag store
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def load_balancing(self, index_tuple, num_partitions):
block1 = self[str(0)+str(index_tuple[0])]
block2 = self[str(1)+str(index_tuple[1])]
block1_valid_count = 0
block2_valid_count = 0
# count number of valid tags in skew 0
for block in block1:
if block['valid']:
block1_valid_count += 1
# count number of valid tags in skew 1
for block in block2:
if block['valid']:
block2_valid_count += 1
# if valid entries in the skew is not balanced, choose the smaller one
if block1_valid_count < block2_valid_count:
return (0, block1_valid_count)
elif block1_valid_count > block2_valid_count:
return (1, block2_valid_count)
else: # randomly choose any skew
randint = random.randint(0,1)
if (randint == 0):
return (0, block1_valid_count)
else:
return (1, block2_valid_count)
def is_hit(self, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index[0]))
blocks = []
if addr_index[0] is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
for i in range(num_partitions):
actual_index = str(i)+str(addr_index[i])
empty_set = True
if (actual_index) in self:
blocks = self[actual_index]
for block in blocks: # enumerate through all the ways to find if any tag is present
if 'tag' in block.keys():
empty_set = False
break
if empty_set == True:
continue
else:
for block in blocks:
if ('tag' in block.keys() and block['tag'] == addr_tag): # if the tag matches, then return true; else false in all cases
partition = i
cal_index = addr_index[i]
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_tags_per_set, skew, valid_count, num_partition, addr_index, new_entry, count_ref_index, num_index_bits):
if (replacement_policy == 'rand'):
if (valid_count < num_tags_per_set): # check if invalid blocks are present in the selected cache set of the skew
repl_block_index = -1 # initialize the variable
for (index, block) in enumerate(blocks): # enumerate through all ways in the cache set
if block['valid'] == 0: # if an invalid block is found
repl_block_index = index
tag_index = str(skew) + str(addr_index)
new_entry['fptr'] = self.do_random_GLE(tag_index, index, num_index_bits) #FPTR = index of entry of data store removed via GLE
break
blocks[repl_block_index] = new_entry # replace the block. This is a GLE
return
else:
print("valid eviction")
writeFile.write_eviction_status()
repl_block_index = random.randint(0, num_tags_per_set - 1) # since no invalid tags are left, select a way randomly
for (index, block) in enumerate(blocks): # find the selected way
if (index == repl_block_index):
new_entry['fptr'] = block['fptr'] # keep FPTR = previous FPTR
blocks[index] = new_entry # replace the block. This is an SAE
return
def set_block(self, replacement_policy, num_tags_per_set, num_partition, addr_index, new_entry, count_ref_index, num_index_bits):
global partition
global cal_index
num_index_bits = int(len(addr_index[0]))
if addr_index[0] is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
skew, count_valid = self.load_balancing(index_tuple = addr_index, num_partitions = num_partition) # perform load balancing between the two skews
blocks = self[str(skew)+str(addr_index[skew])]
self.replace_block(blocks, replacement_policy, num_tags_per_set, skew, count_valid, num_partition, addr_index[skew], new_entry, count_ref_index, num_index_bits)
partition = skew # assign the selected skew
cal_index = addr_index[skew] # assign the cache index in the selected skew
def read_refs(self, num_total_tags_per_set, num_partitions, replacement_policy, num_words_per_block, num_index_bits, refs):
count_ref_index = 0
for ref in refs: # for every address, check whether it's a cache hit or miss
if self.is_hit(ref.index, ref.tag, num_partitions): # check if it's a hit
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
ref.valid = 1
else: # on cache miss, set up the block and replace an existing one
ref.cache_status = ReferenceCacheStatus.miss
self.set_block(
replacement_policy = replacement_policy,
num_tags_per_set = num_total_tags_per_set,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block),
count_ref_index = count_ref_index,
num_index_bits = num_index_bits
)
ref.partition = partition
ref.index = cal_index
ref.valid = 1
count_ref_index += 1
| 12,603 | 55.267857 | 238 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/present.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 23:59:35 2021
@author: anirban
"""
class Present:
def __init__(self,key,rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if (len(key) == 80):
self.roundkeys = generateRoundkeys80(string2number(key),self.rounds)
elif (len(key) == 128):
self.roundkeys = generateRoundkeys128(string2number(key),self.rounds)
else:
raise ValueError("Key must be a 128-bit or 80-bit rawstring")
def encrypt(self,block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state,self.roundkeys[-1])
return number2string_N(cipher,8)
def decrypt(self,block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[-i-1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state,self.roundkeys[0])
return number2string_N(decipher,8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox= [0xc,0x5,0x6,0xb,0x9,0x0,0xa,0xd,0x3,0xe,0xf,0x8,0x4,0x7,0x1,0x2]
Sbox_inv = [Sbox.index(x) for x in range(16)]
PBox = [0,16,32,48,1,17,33,49,2,18,34,50,3,19,35,51,
4,20,36,52,5,21,37,53,6,22,38,54,7,23,39,55,
8,24,40,56,9,25,41,57,10,26,42,58,11,27,43,59,
12,28,44,60,13,29,45,61,14,30,46,62,15,31,47,63]
PBox_inv = [PBox.index(x) for x in range(64)]
def generateRoundkeys80(key,rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >>16)
#1. Shift
#rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2**19-1)) << 61) + (key >> 19)
#2. SBox
#rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76)+(key & (2**76-1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key,rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >>64)
#1. Shift
key = ((key & (2**67-1)) << 61) + (key >> 67)
#2. SBox
key = (Sbox[key >> 124] << 124)+(Sbox[(key >> 120) & 0xF] << 120)+(key & (2**120-1))
#3. Salt
#rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state,roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox[( state >> (i*4)) & 0xF] << (i*4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox_inv[( state >> (i*4)) & 0xF] << (i*4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i, 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N*2, i)
return s
| 5,630 | 34.415094 | 100 |
py
|
randCache
|
randCache-main/Variant_B_SAE_rptr_invalidation/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.skew = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.valid = 0
self.cache_status = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'valid': 1,
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block),
'fptr': None
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
class ReferenceEvictionStatus(Enum):
valid = 1
invalid = 0
def __str__(self):
if self.value == ReferenceEvictionStatus.valid.value:
return 'SAE'
else:
return 'global'
__repr__ = __str__
| 1,829 | 25.911765 | 132 |
py
|
randCache
|
randCache-main/Type-II/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 16:54:52 2021
@author: anirban
"""
import argparse
from simulator import Simulator
from collections import OrderedDict
import configparser
import random
def parse_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cache-size',
type=int,
required=True,
help='the size of the cache in words')
parser.add_argument(
'--num-blocks-per-set',
type=int,
default=1,
help='the number of blocks per set')
parser.add_argument(
'--num-words-per-block',
type=int,
default=2,
help='the number of words per block')
parser.add_argument(
'--num-partitions',
type=int,
default=1,
help='the number of partitions')
parser.add_argument(
'--word-addrs',
nargs='+',
type=int,
required=True,
help='one or more base-10 word addresses')
parser.add_argument(
'--num-addr-bits',
type=int,
default=32,
help='the number of bits in each given word address')
parser.add_argument(
'--replacement-policy',
choices=('lru', 'rand'),
default='rand',
# Ignore argument case (e.g. "lru" and "LRU" are equivalent)
type=str.lower,
help='the cache replacement policy (LRU or RAND)')
return parser.parse_args()
class Configs(dict):
def __init__(self, configs):
for params in configs:
if params == 'cache-size':
self.cache_size = int(configs[params])
if params == 'num-blocks-per-set':
self.num_blocks_per_set = int(configs[params])
if params == 'num-words-per-block':
self.num_words_per_block = int(configs[params])
if params == 'num-partitions':
self.num_partitions = int(configs[params])
if params == 'num-addr-bits':
self.num_addr_bits = int(configs[params])
if params == 'replacement-policy':
self.replacement_policy = configs[params]
def main(address):
parser = configparser.ConfigParser()
parser.read('config.ini')
sections = parser.sections()
cli_args = Configs(parser[sections[0]])
vars(cli_args)['word_addrs'] = address
sim = Simulator()
timing_vals = OrderedDict()
timing_vals = sim.run_simulation(**vars(cli_args))
timing_list = []
for word, timing in timing_vals.items():
timing_list.append(timing)
return timing_list
# if __name__ == '__main__':
# main([3, 80, 41, 786, 874, 875, 198, 456, 675, 325, 81, 142, 712, 564, 560, 345])
| 2,780 | 24.990654 | 88 |
py
|
randCache
|
randCache-main/Type-II/word_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:57:06 2021
@author: anirban
"""
class WordAddress(int):
def get_consecutive_words(self, num_words_per_block):
offset = self % num_words_per_block
return [(self - offset + i) for i in range(num_words_per_block)]
| 309 | 22.846154 | 72 |
py
|
randCache
|
randCache-main/Type-II/getHM.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 14:29:38 2021
@author: anirban
"""
import matplotlib.pyplot as plt
import numpy as np
with open("timing_new_part_no_rand.txt", 'r') as f1:
file = f1.readlines()
timing = list(filter(None, list(map(lambda each:each.strip("\n"), file))))
count_hits = [0] * len(timing)
for i, rows in enumerate(timing):
rows = list(map(int, rows.split()))
count_hits[i] = 0
for item in rows:
count_hits[i] += 1
timing[i] = rows
count_hits = list(filter(lambda x: x > 0, count_hits))
index = []
for i in range(1, len(count_hits) + 1):
index.append(i)
fig=plt.figure(figsize=(8, 2))
ax = plt.gca()
plt.bar(index, count_hits, width=0.5)
plt.xlabel("probe locations", fontweight='bold', fontsize=12)
plt.ylabel("Number of misses", fontweight='bold', fontsize=12)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.xticks(weight = 'bold')
plt.yticks(weight = 'bold')
plt.savefig("Type-II Skewed Cache.pdf", dpi=1200, bbox_inches = 'tight')
plt.show()
| 1,082 | 23.066667 | 74 |
py
|
randCache
|
randCache-main/Type-II/table.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 15:26:46 2021
@author: anirban
"""
class Table(object):
alignment_symbols = {
'left': '<',
'center': '^',
'right': '>'
}
def __init__(self, num_cols, width, alignment='left', title=None):
self.title = title
self.width = width
self.num_cols = num_cols
self.alignment = alignment
self.header = []
self.rows = []
def get_separator(self):
return '-' * self.width
def __str__(self):
table_strs = []
if self.title:
table_strs.append(self.title.center(self.width))
table_strs.append(self.get_separator())
cell_format_str = ''.join('{{:{}{}}}'.format(Table.alignment_symbols[self.alignment], self.width // self.num_cols) for i in range(self.num_cols))
if self.header:
table_strs.append(cell_format_str.format(*self.header))
table_strs.append(self.get_separator())
for row in self.rows:
table_strs.append(cell_format_str.format(*map(str, row)))
return '\n'.join(table_strs)
| 1,235 | 27.744186 | 153 |
py
|
randCache
|
randCache-main/Type-II/bin_addr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 19:44:28 2021
@author: anirban
"""
import random
from present import Present
class BinaryAddress(str):
partition = None
def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0):
if word_addr is not None:
return super().__new__(cls, bin(word_addr)[2:].zfill(num_addr_bits))
else:
return super().__new__(cls, bin_addr)
@classmethod
def prettify(cls, bin_addr, min_bits_per_group):
mid = len(bin_addr) // 2
if mid < min_bits_per_group:
return bin_addr
else:
left = cls.prettify(bin_addr[:mid], min_bits_per_group)
right = cls.prettify(bin_addr[mid:], min_bits_per_group)
return ' '.join((left, right))
def get_tag(self, num_tag_bits):
end = num_tag_bits
tag = self[:end]
if (len(tag) != 0):
return tag
else:
return None
def get_partition(self, num_partitions, ways_per_partition):
global partition
total_ways = num_partitions * ways_per_partition
if (num_partitions > total_ways):
num_partitions = total_ways
partition = random.randint(0, num_partitions - 1)
return partition
def get_index(self, num_offset_bits, num_index_bits, num_partitions):
global partition
ciphertext = self
start = len(ciphertext) - num_offset_bits - (num_partitions * num_index_bits)
end = len(ciphertext) - num_offset_bits
index = ciphertext[start:end]
if (len(index) != 0):
return index
else:
return None
def get_offset(self, num_offset_bits):
start = len(self) - num_offset_bits
offset = self[start:]
if (len(offset) != 0):
return offset
else:
return None
| 1,973 | 29.369231 | 85 |
py
|
randCache
|
randCache-main/Type-II/simulator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:00:37 2021
@author: anirban
"""
import math
import shutil
from collections import OrderedDict
from cache import Cache
from bin_addr import BinaryAddress
from reference import Reference
from table import Table
REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Partition', 'Index', 'Offset', 'Hit/Miss')
MIN_BITS_PER_GROUP = 4
DEFAULT_TABLE_WIDTH = 200
class Simulator(object):
def get_addr_refs(self, word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
return [Reference(word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition) for word_addr in word_addrs]
def set_index(self, num_partitions, num_index_bits, refs):
for ref in refs:
if (len(ref.index) > num_index_bits):
start = len(ref.index) - ((ref.partition + 1) * num_index_bits)
end = len(ref.index) - (ref.partition * num_index_bits)
ref.index = ref.index[start:end]
return refs
def display_addr_refs(self, refs, table_width):
table = Table(num_cols=len(REF_COL_NAMES), width = table_width, alignment = 'center')
table.header[:] = REF_COL_NAMES
for ref in refs:
if ref.tag is not None:
ref_tag = ref.tag
else:
ref_tag = 'n/a'
if ref.index is not None:
ref_index = ref.index
else:
ref_index = 'n/a'
if ref.offset is not None:
ref_offset = ref.offset
else:
ref_offset = 'n/a'
table.rows.append((
ref.word_addr,
BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP),
ref.partition,
BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP),
BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP),
ref.cache_status))
print(table)
def display_cache(self, cache, table_width, refs):
table = Table(num_cols=len(cache), width = table_width, alignment = 'center')
table.title = 'Cache'
cache_set_names = sorted(cache.keys())
if len(cache) != 1:
table.header[:] = cache_set_names
table.rows.append([])
for index in cache_set_names:
blocks = cache[index]
table.rows[0].append("("+str(' '.join(','.join(map(str, entry['data'])) for entry in blocks))+")")
print(table)
def emulate_timing(self, refs):
timing_vals = OrderedDict()
for ref in refs:
if (ref.cache_status.name == 'hit'):
timing_vals[str(ref.word_addr)] = 200
else:
timing_vals[str(ref.word_addr)] = 600
return timing_vals
def run_simulation(self, num_blocks_per_set, num_words_per_block, cache_size, num_partitions, replacement_policy, num_addr_bits, word_addrs):
num_blocks = cache_size // num_words_per_block
num_sets = num_blocks // num_blocks_per_set
ways_per_partition = num_blocks_per_set // num_partitions
num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)
num_offset_bits = int(math.log2(num_words_per_block))
num_index_bits = int(math.log2(num_sets))
num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits
refs = self.get_addr_refs(word_addrs, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition)
# print(refs)
cache = Cache(num_sets = num_sets, num_index_bits = num_index_bits, num_partitions = num_partitions, ways_per_partition = ways_per_partition)
cache.read_refs(num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs)
timing_vals = self.emulate_timing(refs)
refs = self.set_index(num_partitions, num_index_bits, refs)
table_width = max((shutil.get_terminal_size((DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH))
# print()
# self.display_addr_refs(refs, table_width)
# print()
# self.display_cache(cache, table_width, refs)
# print()
return timing_vals
| 4,696 | 35.695313 | 163 |
py
|
randCache
|
randCache-main/Type-II/cache.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:27 2021
@author: anirban
"""
from bin_addr import BinaryAddress
from word_addr import WordAddress
from reference import ReferenceCacheStatus
import random
class Cache(dict):
partition = None
cal_index = None
def __init__(self, cache=None, num_sets=None, num_index_bits=None, num_partitions = None, ways_per_partition = None):
self.recently_used_addrs = []
if cache is not None:
self.update(cache)
else:
for j in range(num_partitions):
for i in range(num_sets):
index = BinaryAddress(word_addr = WordAddress(i), num_addr_bits = num_index_bits)
self[str(j)+str(index)] = []
def mark_ref_as_last_seen(self, ref):
addr_id = (ref.index, ref.tag)
if addr_id in self.recently_used_addrs:
self.recently_used_addrs.remove(addr_id)
self.recently_used_addrs.append(addr_id)
def is_hit(self, addr_partition, addr_index, addr_tag, num_partitions):
global partition
global cal_index
num_index_bits = int(len(addr_index) / num_partitions)
blocks = []
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
num_index_bits = int(len(addr_index) / num_partitions)
for i in range(num_partitions):
start = len(addr_index) - ((i + 1) * num_index_bits)
end = len(addr_index) - (i * num_index_bits)
actual_index = addr_index[start:end]
if (str(i)+str(actual_index)) in self:
if self[(str(i)+str(actual_index))] == []:
continue
else:
blocks = self[str(i)+str(actual_index)]
for block in blocks:
if (block['tag'] == addr_tag):
partition = i
cal_index = actual_index
return True
else:
return False
return False
def replace_block(self, blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
if (replacement_policy == 'rand'):
repl_block_index = random.randint(0, (num_blocks_per_set // num_partition) - 1)
for (i, block) in enumerate(blocks):
if (i == repl_block_index):
blocks[i] = new_entry
return
if (replacement_policy == 'lru'):
recently_used_addrs = self.recently_used_addrs
for recent_index, recent_tag in recently_used_addrs:
for i, block in enumerate(blocks):
if (recent_index == addr_index and block['tag'] == recent_tag):
blocks[i] = new_entry
return
def set_block(self, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry):
num_index_bits = int(len(addr_index) / num_partition)
if addr_index is None:
blocks = self[str(0).zfill(num_index_bits)]
else:
start = len(addr_index) - ((addr_partition + 1) * num_index_bits)
end = len(addr_index) - (addr_partition * num_index_bits)
addr_index = addr_index[start:end]
blocks = self[str(addr_partition)+ (str(addr_index).zfill(num_index_bits))]
if (len(blocks) == (num_blocks_per_set // num_partition)):
self.replace_block(blocks, replacement_policy, num_blocks_per_set, addr_partition, num_partition, addr_index, new_entry)
else:
blocks.append(new_entry)
def read_refs(self, num_blocks_per_set, num_words_per_block, num_partitions, replacement_policy, refs):
for ref in refs:
self.mark_ref_as_last_seen(ref)
if self.is_hit(ref.partition, ref.index, ref.tag, num_partitions):
ref.cache_status = ReferenceCacheStatus.hit
ref.partition = partition
ref.index = cal_index
else:
ref.cache_status = ReferenceCacheStatus.miss
self.set_block(
replacement_policy = replacement_policy,
num_blocks_per_set = num_blocks_per_set,
addr_partition = ref.partition,
num_partition = num_partitions,
addr_index = ref.index,
new_entry = ref.get_cache_entry(num_words_per_block)
)
| 5,073 | 36.865672 | 132 |
py
|
randCache
|
randCache-main/Type-II/present.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 23:59:35 2021
@author: anirban
"""
class Present:
def __init__(self,key,rounds=32):
"""Create a PRESENT cipher object
key: the key as a 128-bit or 80-bit rawstring
rounds: the number of rounds as an integer, 32 by default
"""
self.rounds = rounds
if (len(key) == 80):
self.roundkeys = generateRoundkeys80(string2number(key),self.rounds)
elif (len(key) == 128):
self.roundkeys = generateRoundkeys128(string2number(key),self.rounds)
else:
raise ValueError("Key must be a 128-bit or 80-bit rawstring")
def encrypt(self,block):
"""Encrypt 1 block (8 bytes)
Input: plaintext block as raw string
Output: ciphertext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[i])
state = sBoxLayer(state)
state = pLayer(state)
cipher = addRoundKey(state,self.roundkeys[-1])
return number2string_N(cipher,8)
def decrypt(self,block):
"""Decrypt 1 block (8 bytes)
Input: ciphertext block as raw string
Output: plaintext block as raw string
"""
state = string2number(block)
for i in range (self.rounds-1):
state = addRoundKey(state,self.roundkeys[-i-1])
state = pLayer_dec(state)
state = sBoxLayer_dec(state)
decipher = addRoundKey(state,self.roundkeys[0])
return number2string_N(decipher,8)
def get_block_size(self):
return 8
# 0 1 2 3 4 5 6 7 8 9 a b c d e f
Sbox= [0xc,0x5,0x6,0xb,0x9,0x0,0xa,0xd,0x3,0xe,0xf,0x8,0x4,0x7,0x1,0x2]
Sbox_inv = [Sbox.index(x) for x in range(16)]
PBox = [0,16,32,48,1,17,33,49,2,18,34,50,3,19,35,51,
4,20,36,52,5,21,37,53,6,22,38,54,7,23,39,55,
8,24,40,56,9,25,41,57,10,26,42,58,11,27,43,59,
12,28,44,60,13,29,45,61,14,30,46,62,15,31,47,63]
PBox_inv = [PBox.index(x) for x in range(64)]
def generateRoundkeys80(key,rounds):
"""Generate the roundkeys for a 80-bit key
Input:
key: the key as a 80-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
# rawKey[0:64]
roundkeys.append(key >>16)
#1. Shift
#rawKey[19:len(rawKey)]+rawKey[0:19]
key = ((key & (2**19-1)) << 61) + (key >> 19)
#2. SBox
#rawKey[76:80] = S(rawKey[76:80])
key = (Sbox[key >> 76] << 76)+(key & (2**76-1))
#3. Salt
#rawKey[15:20] ^ i
key ^= i << 15
return roundkeys
def generateRoundkeys128(key,rounds):
"""Generate the roundkeys for a 128-bit key
Input:
key: the key as a 128-bit integer
rounds: the number of rounds as an integer
Output: list of 64-bit roundkeys as integers"""
roundkeys = []
for i in range(1,rounds+1): # (K1 ... K32)
# rawkey: used in comments to show what happens at bitlevel
roundkeys.append(key >>64)
#1. Shift
key = ((key & (2**67-1)) << 61) + (key >> 67)
#2. SBox
key = (Sbox[key >> 124] << 124)+(Sbox[(key >> 120) & 0xF] << 120)+(key & (2**120-1))
#3. Salt
#rawKey[62:67] ^ i
key ^= i << 62
return roundkeys
def addRoundKey(state,roundkey):
return state ^ roundkey
def sBoxLayer(state):
"""SBox function for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox[( state >> (i*4)) & 0xF] << (i*4)
return output
def sBoxLayer_dec(state):
"""Inverse SBox function for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(16):
output += Sbox_inv[( state >> (i*4)) & 0xF] << (i*4)
return output
def pLayer(state):
"""Permutation layer for encryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox[i]
return output
def pLayer_dec(state):
"""Permutation layer for decryption
Input: 64-bit integer
Output: 64-bit integer"""
output = 0
for i in range(64):
output += ((state >> i) & 0x01) << PBox_inv[i]
return output
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i, 16)
def number2string_N(i, N):
"""Convert a number to a string of fixed size
i: long or integer
N: length of string
Output: string (big-endian)
"""
s = '%0*x' % (N*2, i)
return s
| 5,630 | 34.415094 | 100 |
py
|
randCache
|
randCache-main/Type-II/reference.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:12:52 2021
@author: anirban
"""
from collections import OrderedDict
from enum import Enum
from bin_addr import BinaryAddress
from word_addr import WordAddress
class Reference(object):
def __init__(self, word_addr, num_addr_bits, num_offset_bits, num_index_bits, num_tag_bits, num_partitions, ways_per_partition):
self.word_addr = WordAddress(word_addr)
self.bin_addr = BinaryAddress(word_addr = self.word_addr, num_addr_bits = num_addr_bits)
self.offset = self.bin_addr.get_offset(num_offset_bits)
self.partition = self.bin_addr.get_partition(num_partitions, ways_per_partition)
self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits, num_partitions)
self.tag = self.bin_addr.get_tag(num_tag_bits)
self.cache_status = None
def __str__(self):
return str(OrderedDict(sorted(self.__dict__.items())))
__repr__ = __str__
def get_cache_entry(self, num_words_per_block):
return {
'tag': self.tag,
'data': self.word_addr.get_consecutive_words(num_words_per_block)
}
class ReferenceCacheStatus(Enum):
miss = 0
hit = 1
def __str__(self):
if self.value == ReferenceCacheStatus.hit.value:
return 'HIT'
else:
return 'miss'
__repr__ = __str__
| 1,499 | 27.301887 | 132 |
py
|
FPI
|
FPI-master/readData.py
|
import tensorflow as tf
import os
import numpy as np
import nibabel as nib
class data_frame:
"""
A single character class.
"""
def __init__(self, batch_dim, primary_axis):
#this helps keep data handling consistent
self.batch_dim = batch_dim
self.primary_axis = primary_axis
self.tf_dataset = None
self.file_list = None
def load_data(self,train_list,data_dir,load_labels=False,label_dir=None,**kwargs):
if load_labels:
print('loading labels, forcing shuffle OFF')
kwargs['shuffle_order']=False
file_list = get_file_list(train_list,data_dir)
tf_dataset = create_dataset(file_list,self.batch_dim,self.primary_axis,**kwargs)
if load_labels:
#load data labels as well
old_filenames = [fl_i.split('.')[0] for fl_i in train_list]
new_filenames = [fl_i+'_label' for fl_i in old_filenames]
file_list_l = [fl_i.replace(old_f,new_f) for fl_i,old_f,new_f in zip(train_list,old_filenames,new_filenames)]
if label_dir == None:
#assume labels are in the same directory
new_data_dir = data_dir
else:
new_data_dir = label_dir
#optionally add label to dir name
#new_data_dir = data_dir[:-1] if data_dir[-1] == '/' else data_dir +'_label'
file_list_l = get_file_list(file_list_l,new_data_dir)
#load labels
tf_dataset_l = create_dataset(file_list_l,self.batch_dim,self.primary_axis,loading_label=True,**kwargs)
#pack data and labels together
self.tf_dataset = tf.data.Dataset.zip((tf_dataset,tf_dataset_l))
self.file_list = (file_list, file_list_l)
return
else:
#just data
self.tf_dataset = tf_dataset
self.file_list = file_list
return
def save_nii(self,out_array,results_dir,out_fname):
#rearrange array to default orientation
out_array = np.rollaxis(out_array,0,self.primary_axis+1)#REVERSE roll
out_array = out_array[...,0]#REMOVE 'channel' dimension
#out_array = out_array.astype(np.float16)
out_nii = nib.Nifti1Image(out_array, affine=np.eye(4))
nib.save(out_nii, os.path.join(results_dir,out_fname))
return
def get_file_list(f_list_path,data_dir):
if isinstance(f_list_path, str):
#read file in path and extract file names
with open(f_list_path, 'r') as f:
#for item in list_i:
file_list = f.readlines()
elif isinstance(f_list_path, list) :
#alternatively accept list of file names
file_list = f_list_path
else:
raise ValueError("Unexpected type for train_list, must be list or path to text file")
file_list = [os.path.join(data_dir,f_i.split('\n')[0]) for f_i in file_list]
return file_list
def preprocess_func(image):
#for now, do nothing
return image
def _read_nib_func(img_path,exception_size,primary_axis,loading_label):
img_path = str(img_path.numpy(), 'utf-8')#.numpy extracts the string from the tensor
#try block handles case where label does not exist, returns zero-filled volume
#if not explicitly loading a label, raise error as per normal
try:
img_file = nib.load(img_path)
img_decoded = img_file.get_fdata() # access images
img_decoded = img_decoded.astype(np.float32)#match tensor type expected
except FileNotFoundError as e: #Exception as e:
if loading_label:
print('Assuming zero-filled data: '+str(e))
img_decoded = np.zeros(exception_size).astype(np.float32)
else:
raise e
#primary axis will be put first, 2 for brain, 1 for abdomen
img_decoded = np.rollaxis(img_decoded,primary_axis)#change axes
if len(np.shape(img_decoded)) <4:
img_decoded = img_decoded[...,None]#add 'channel' dimension
return img_decoded
def create_dataset(file_list,batch_dim,primary_axis,preproc_f = preprocess_func,shuffle_order=True,loading_label=False,normalize=True):
#create dataset
data_from_list = tf.data.Dataset.from_tensor_slices(file_list)
if shuffle_order:
data_from_list = data_from_list.shuffle(len(file_list))
#use non-tensorflow function to read nifti file, flat_map combines slices from volumes
#size and label arguments used to return zero-filled volume for non-existent labels
data_from_list = data_from_list.flat_map(lambda f_path:
tf.data.Dataset.from_tensor_slices(tuple(
tf.py_function(lambda x:_read_nib_func(x,
batch_dim[:-1],#exception size default volume
primary_axis,#axis in volume to put first
loading_label),#if loading label give option of defaulting to zeros-filled volume
[f_path],[tf.float32]))))#inputs and outputs of py_func
#data_from_list = data_from_list.map(_read_nib_func)
#without preproc_f it seems to return volume instead of flattened
data_from_list = data_from_list.map(preproc_f, num_parallel_calls=4)
data_from_list = data_from_list.batch(batch_dim[0])
data_from_list = data_from_list.prefetch(1)
return data_from_list
| 5,318 | 35.9375 | 135 |
py
|
FPI
|
FPI-master/self_sup_task.py
|
import numpy as np
import tensorflow as tf
to_categorical = tf.keras.utils.to_categorical
'''
def to_categorical(y,num_classes):
onehot = np.zeros((len(y), num_classes))
onehot[np.arange(len(y)),y] = 1
return onehot
'''
def create_interp_mask(ima,patch_center,patch_width,patch_interp):
dims=np.shape(ima)
mask_i = np.zeros_like(ima)
for frame_ind in range(dims[0]):
coor_min = patch_center[frame_ind]-patch_width[frame_ind]
coor_max = patch_center[frame_ind]+patch_width[frame_ind]
#clip coordinates to within image dims
coor_min = np.clip(coor_min,0,dims[1:3])
coor_max = np.clip(coor_max,0,dims[1:3])
mask_i[frame_ind,
coor_min[0]:coor_max[0],
coor_min[1]:coor_max[1]] = patch_interp[frame_ind]
return mask_i
def patch_ex(ima1,ima2,num_classes=None,core_percent=0.8,tolerance=None):
#exchange patches between two image arrays based on a random interpolation factor
#create random anomaly
dims = np.array(np.shape(ima1))
core = core_percent*dims#width of core region
offset = (1-core_percent)*dims/2#offset to center core
min_width = np.round(0.05*dims[1])
max_width = np.round(0.2*dims[1])
center_dim1 = np.random.randint(offset[1],offset[1]+core[1],size=dims[0])
center_dim2 = np.random.randint(offset[2],offset[2]+core[2],size=dims[0])
patch_center = np.stack((center_dim1,center_dim2),1)
patch_width = np.random.randint(min_width,max_width,size=dims[0])
if num_classes == None:
#interpolation factor between 5 and 95%
patch_interp = np.random.uniform(0.05,0.95,size=dims[0])
else:
#interpolation between 0 and 1, num class options
patch_interp = np.random.choice(num_classes-1,size=dims[0])/(num_classes-1)#subtract 1 to exclude default class
offset = 1E-5#offset to separate 0 patches from background
mask_i = create_interp_mask(ima1,patch_center,patch_width,patch_interp+offset)
patch_mask = np.clip(np.ceil(mask_i),0,1)#all patches set to 1
mask_i = mask_i-patch_mask*offset#get rid of offset
mask_inv = patch_mask-mask_i
zero_mask = 1-patch_mask#zero in the region of the patch
patch_set1 = mask_i*ima1 + mask_inv*ima2 #interpolate between patches
patch_set2 = mask_inv*ima1 + mask_i*ima2
patchex1 = ima1*zero_mask + patch_set1
patchex2 = ima2*zero_mask + patch_set2
if tolerance:
valid_label = np.any(
np.floor(patch_mask*ima1*tolerance**-1)*tolerance != \
np.floor(patch_mask*ima2*tolerance**-1)*tolerance,
axis=3)
else:
valid_label = np.any(patch_mask*ima1 != patch_mask*ima2, axis=3)
label = valid_label[...,None]*mask_inv
if num_classes is not None:
label = label*(num_classes-1)
label = to_categorical(label,num_classes)
return (patchex1,label), (patchex2, label)
| 2,950 | 34.987805 | 119 |
py
|
FPI
|
FPI-master/fpiSubmit.py
|
import numpy as np
import itertools
import copy
from datetime import datetime
import os
import pickle
from sklearn.metrics import average_precision_score
import tensorflow as tf
import readData
import self_sup_task
from models.wide_residual_network import create_wide_residual_network_selfsup
from scipy.signal import savgol_filter
from utils import save_roc_pr_curve_data
import gc
def train_folder(input_dir,output_dir,mode,data):
gpu = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu[0], True)
data_frame = get_data_frame(data,input_dir,shuffle_order=True)
mdl = get_mdl(data,data_frame,restore=False)
submit_train(mdl,data_frame,output_dir,data)
return
def predict_folder(input_dir,output_dir,mode,data):
#K.manual_variable_initialization(True)
gpu = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu[0], True)
data_frame = get_data_frame(data,input_dir,shuffle_order=False)
mdl = get_mdl(data,data_frame,restore=True)
submit_test(mdl,data_frame,output_dir,mode)
return
def get_data_frame(data,input_dir,shuffle_order=False,load_labels=False):
if 'brain' in data:
batch_dim = [256,256,256,1]
primary_axis = 2
elif 'abdom' in data:
batch_dim = [512,512,512,1]
primary_axis = 1
else:
raise ValueError("data type not correctly defined. Either choose 'brain','abdom', or add a new definition")
data_frame = readData.data_frame(batch_dim,primary_axis)
input_list = os.listdir(input_dir)
data_frame.load_data(input_list,input_dir,shuffle_order=shuffle_order,load_labels=load_labels)
return data_frame
def get_mdl(data,data_frame,restore=False):
if 'brain' in data:
n, k = (16,4)#network size
net_f='create_wide_residual_network_dec'
n_classes = 1
model_dir = '/workspace/restore_dir/brain/'
elif 'abdom' in data:
n, k = (19,4)#network size
net_f='create_wide_residual_network_decdeeper'
n_classes = 5
model_dir = '/workspace/restore_dir/abdom/'
else:
raise ValueError("data type not correctly defined. Either choose 'brain','abdom', or add a new definition")
if restore:
#grab weights and build model
model_fnames = os.listdir(model_dir)
model_fnames = [fn for fn in model_fnames if 'weights' in fn][0]
model_path = os.path.join(model_dir,model_fnames)
print(model_path)
mdl = tf.keras.models.load_model(model_path)
else:
#build new model
mdl = create_wide_residual_network_selfsup(data_frame.batch_dim[1:],
n_classes, n, k, net_f=net_f)
return mdl
@tf.function
def train_step(mdl,x, y):
loss_fn = mdl.compiled_loss
with tf.GradientTape() as tape:
logits = mdl(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, mdl.trainable_weights)
mdl.optimizer.apply_gradients(zip(grads, mdl.trainable_weights))
mdl.compiled_metrics.update_state(y, logits)
return loss_value
@tf.function
def test_step(mdl,x, y):
loss_fn = mdl.compiled_loss
logits = mdl(x, training=False)
loss_value = loss_fn(y, logits)
return loss_value
@tf.function
def pred_step(mdl,x):
pred = mdl(x, training=False)
return pred
def grouped(iterable, n):
#get n elements at a time
return zip(*[iter(iterable)]*n)
def submit_train(mdl,data_frame,output_dir,data,epochs=50,cyclic_epochs=0,save_name='selfsup_mdl',training_batch_size=32):
print('training start: {}'.format(datetime.now().strftime('%Y-%m-%d-%H%M')))
num_classes = mdl.output_shape[-1]
num_classes = None if num_classes <= 1 else num_classes
fpi_args = {'num_classes':num_classes,
'core_percent':0.5 if 'brain' in data else 0.8,
'tolerance': None if 'brain' in data else 1E-3
}
elem_in_epoch = len(data_frame.file_list)
if cyclic_epochs>0:
half_cycle_len = elem_in_epoch//4
lr_min = 1E-4
lr_max = 1E-1
half1 = np.linspace(lr_min,lr_max,half_cycle_len)
half2 = np.linspace(lr_max,lr_min,half_cycle_len)
lr_cycle = np.concatenate((half1,half2),0)
for epoch_i in range(epochs+cyclic_epochs):
if epoch_i>epochs and elem_i < len(lr_cycle):
#cyclic training portion, adjust learning rate
tf.keras.backend.set_value(mdl.optimizer.lr, lr_cycle[elem_i])
#get subjects in pairs for mixing
for batch_in,batch_in2 in grouped(data_frame.tf_dataset,2):
#apply fpi on batch
pex1,pex2 = self_sup_task.patch_ex(batch_in,batch_in2,**fpi_args)
ind_sampler = index_sampling(len(pex1[0]))#randomize slices in batch
for _ in range(len(pex1[0])//training_batch_size):
cur_inds = ind_sampler.get_inds(training_batch_size)
train_step(mdl,tf.gather(pex1[0],cur_inds),tf.gather(pex1[1],cur_inds))
train_step(mdl,tf.gather(pex2[0],cur_inds),tf.gather(pex2[1],cur_inds))
print('epoch {}: {}'.format(str(epoch_i),datetime.now().strftime('%Y-%m-%d-%H%M')))
#measure loss
for batch_in,batch_in2 in grouped(data_frame.tf_dataset,2):
break
pex1,pex2 = self_sup_task.patch_ex(batch_in,batch_in2,**fpi_args)
avg_loss = []
ind_sampler = index_sampling(len(pex1[0]))#randomize slices in batch
for _ in range(len(pex1[0])//training_batch_size):
cur_inds = ind_sampler.get_inds(training_batch_size)
avg_loss.append(test_step(mdl,tf.gather(pex1[0],cur_inds),tf.gather(pex1[1],cur_inds)))
avg_loss.append(test_step(mdl,tf.gather(pex2[0],cur_inds),tf.gather(pex2[1],cur_inds)))
avg_loss = np.mean(avg_loss)
print('Avg loss: {}'.format(avg_loss))
if epoch_i == 0:
best_loss = avg_loss
elif avg_loss < best_loss:
best_loss = avg_loss
print('new best loss')
save_model(mdl,output_dir,save_name+'_bestLoss',time_stamp=False)
if epoch_i % 10 == 0 or epoch_i>epochs:
#save every 10 epochs or every epoch in cyclic mode
save_model(mdl,output_dir,save_name)
#save final model
save_model(mdl,output_dir,save_name+'_final')
return
def submit_test(mdl,data_frame,output_dir,mode,batch_size=1,save_name='selfsup_mdl'):
print('testing start: {}'.format(datetime.now().strftime('%Y-%m-%d-%H%M')))
nii_file = 0
for batch_in in data_frame.tf_dataset:
#predict for subject
pred = np.zeros(np.shape(batch_in))
for ind in range(len(batch_in)//batch_size):
pred[ind:(ind+1)*batch_size] = pred_step(mdl,batch_in[ind:(ind+1)*batch_size])
output_chan = np.shape(pred)[-1]
if output_chan > 1:
pred *= np.arange(output_chan)/(output_chan-1)
pred = np.sum(pred,-1,keepdims=True)
#save output as nifti and label with label suffix
#print(data_frame.file_list[0])#only data, not label names
fname_i = data_frame.file_list[nii_file].split('/')[-1]
if 'sample' in mode:
#derive subject-level score
im_level_score = np.mean(pred,axis=(1,2,3))
window_size = int((len(im_level_score)*0.1)//2)*2+1#take 10% sliding filter window
im_level_score_f = savgol_filter(im_level_score,window_size,3)#order 3 polynomial
im_level_score_s = sorted(im_level_score_f)
im_level_score_s = im_level_score_s[int(len(im_level_score_s)*0.75):]
sample_score = np.mean(im_level_score_s)#mean of top quartile values
with open(os.path.join(output_dir,fname_i + ".txt"), "w") as write_file:
write_file.write(str(sample_score))
if 'pixel' in mode:
data_frame.save_nii(pred,output_dir,fname_i)
nii_file += 1
return
def save_model(mdl,results_dir,fname,time_stamp=True):
#save model
if time_stamp:
#mdl_weights_name = fname+'_{}_weights.h5'.format(datetime.now().strftime('%Y-%m-%d-%H%M'))
mdl_weights_name = fname+'_{}_weights'.format(datetime.now().strftime('%Y-%m-%d-%H%M'))
else:
#mdl_weights_name = fname+'_weights.h5'
mdl_weights_name = fname+'_weights'
mdl_weights_path = os.path.join(results_dir, mdl_weights_name)
mdl.save(mdl_weights_path)
return
class index_sampling(object):
def __init__(self,total_len):
self.total_len = total_len
self.ind_generator = rand_ind_fisheryates(self.total_len)
def get_inds(self,batch_size):
cur_inds = list(itertools.islice(self.ind_generator,batch_size))
if len(cur_inds) < batch_size:
#end of iterator - reset/shuffle
self.ind_generator = rand_ind_fisheryates(self.total_len)
cur_inds = list(itertools.islice(self.ind_generator,batch_size))
return cur_inds
def reset():
self.ind_generator = rand_ind_fisheryates(self.total_len)
return
def rand_ind_fisheryates(num_inds):
numbers=np.arange(num_inds,dtype=np.uint32)
for ind_i in range(num_inds):
j=np.random.randint(ind_i,num_inds)
numbers[ind_i],numbers[j]=numbers[j],numbers[ind_i]
yield numbers[ind_i]
| 9,435 | 34.078067 | 122 |
py
|
FPI
|
FPI-master/utils.py
|
import os
import numpy as np
from sklearn.metrics import roc_curve, precision_recall_curve, auc, average_precision_score
def save_roc_pr_curve_data(scores, labels, file_path=None):
scores = scores.flatten()
labels = labels.flatten()
scores_pos = scores[labels == 1]
scores_neg = scores[labels != 1]
truth = np.concatenate((np.zeros_like(scores_neg), np.ones_like(scores_pos)))
preds = np.concatenate((scores_neg, scores_pos))
fpr, tpr, roc_thresholds = roc_curve(truth, preds)
roc_auc = auc(fpr, tpr)
# pr curve where "normal" is the positive class
precision_norm, recall_norm, pr_thresholds_norm = precision_recall_curve(truth, preds)
pr_auc_norm = auc(recall_norm, precision_norm)
# pr curve where "anomaly" is the positive class
precision_anom, recall_anom, pr_thresholds_anom = precision_recall_curve(truth, -preds, pos_label=0)
pr_auc_anom = auc(recall_anom, precision_anom)
ap = average_precision_score(truth, preds)
if file_path:
#save complete record
np.savez_compressed(file_path,
preds=preds, truth=truth,
fpr=fpr, tpr=tpr, roc_thresholds=roc_thresholds, roc_auc=roc_auc,
precision_norm=precision_norm, recall_norm=recall_norm,
pr_thresholds_norm=pr_thresholds_norm, pr_auc_norm=pr_auc_norm,
precision_anom=precision_anom, recall_anom=recall_anom,
pr_thresholds_anom=pr_thresholds_anom, pr_auc_anom=pr_auc_anom,
ap=ap)
else:
#just return scores
score_dict = {'roc_auc':roc_auc,'pr_auc_norm':pr_auc_norm,
'pr_auc_anom':pr_auc_anom,'ap':ap}
return score_dict
| 1,690 | 32.82 | 104 |
py
|
FPI
|
FPI-master/train_simple.py
|
import os
import nibabel as nib
import numpy as np
import fpiSubmit
def train_folder(input_dir,output_dir,mode,data):
fpiSubmit.train_folder(input_dir,output_dir,mode,data)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=True, type=str)
parser.add_argument("-o", "--output", required=True, type=str)
parser.add_argument("-mode", type=str, default="pixel", help="can be either 'pixel' or 'sample'.", required=False)
parser.add_argument("-d","--data", type=str, help="can be either 'brain' or 'abdom'.", required=True)
args = parser.parse_args()
input_dir = args.input
output_dir = args.output
mode = args.mode
data = args.data
train_folder(input_dir, output_dir, mode, data)
| 825 | 24.030303 | 118 |
py
|
FPI
|
FPI-master/evalresults.py
|
import json
import os
import random
import traceback
import nibabel as nib
import numpy as np
from sklearn import metrics
class InvalidPredictionException(Exception):
pass
class CouldNotProcessException(Exception):
pass
def process_file_pixelwise(pred_path, label_path):
pred_list, label_list = [], []
label_appended, pred_appended = False, False
try:
label_nimg = nib.load(label_path)
label_array = np.rint(label_nimg.get_fdata()).astype(np.int)
# should already be in that interval but just be sure
label_array = np.clip(label_array, a_max=1, a_min=0)
label_array = label_array == 1
label_list = label_array.flatten()
label_appended = True
if os.path.exists(pred_path):
pred_nimg = nib.load(pred_path)
pred_array = pred_nimg.get_fdata(dtype=np.float16)
if pred_array.shape != label_array.shape:
raise InvalidPredictionException("Array shapes do not match", pred_path)
# well predicitions should also be in [0,1]
pred_array = np.clip(pred_array, a_max=1.0, a_min=0.0)
pred_list = pred_array.flatten()
pred_appended = True
else:
raise InvalidPredictionException("Prediction file not found", pred_path)
except InvalidPredictionException:
pred_array = np.zeros_like(label_array)
pred_list = pred_array.flatten()
except Exception:
if label_appended and not pred_appended:
pred_array = np.zeros_like(label_array)
pred_list = pred_array.flatten()
else:
raise CouldNotProcessException("CouldNotProcessException")
return pred_list, label_list
def process_file_samplewise(pred_path, label_path):
label_appended, pred_appended = False, False
try:
with open(label_path, "r") as val_fl:
val_str = val_fl.readline()
label = int(val_str)
label_appended = True
if os.path.exists(pred_path):
with open(pred_path, "r") as pred_fl:
pred_str = pred_fl.readline()
pred = float(pred_str)
# predicitions should also be in [0,1]
pred = np.clip(pred, a_max=1.0, a_min=0.0)
pred_appended = True
else:
raise InvalidPredictionException("Prediction file not found", pred_path)
except InvalidPredictionException:
pred = 0.0
except Exception:
if label_appended and not pred_appended:
pred = 0.0
else:
traceback.print_exc()
raise CouldNotProcessException("CouldNotProcessException")
return [pred], [label]
def eval_list(pred_file_list, label_file_list, mode="pixel"):
label_vals = []
pred_vals = []
for pred_path, label_path in zip(pred_file_list, label_file_list):
try:
if mode == "pixel":
pred_list, label_list = process_file_pixelwise(pred_path, label_path)
elif mode == "sample":
pred_list, label_list = process_file_samplewise(pred_path, label_path)
else:
pred_list, label_list = []
pred_vals.append(pred_list)
label_vals.append(label_list)
except Exception:
print(f"Smth went fundamentally wrong with {pred_path}")
label_vals = np.concatenate(label_vals, axis=0)
pred_vals = np.concatenate(pred_vals, axis=0)
return metrics.average_precision_score(label_vals, pred_vals)
def eval_dir(pred_dir, label_dir, mode="pixel", save_file=None):
pred_file_list = []
label_file_list = []
for f_name in sorted(os.listdir(label_dir)):
pred_file_path = os.path.join(pred_dir, f_name)
label_file_path = os.path.join(label_dir, f_name)
pred_file_list.append(pred_file_path)
label_file_list.append(label_file_path)
score = eval_list(pred_file_list, label_file_list, mode=mode)
if save_file is not None:
with open(save_file, "w") as outfile:
json.dump(score, outfile)
return score
def bootstrap_dir(
pred_dir, label_dir, splits_file=None, n_runs=10, n_files=2, save_dir=None, seed=123, mode="pixel",
):
random.seed(seed)
all_preds_file_list = []
all_labels_file_list = []
for f_name in sorted(os.listdir(label_dir)):
pred_file_path = os.path.join(pred_dir, f_name)
label_file_path = os.path.join(label_dir, f_name)
all_preds_file_list.append(pred_file_path)
all_labels_file_list.append(label_file_path)
all_preds_file_list = np.array(all_preds_file_list)
all_labels_file_list = np.array(all_labels_file_list)
scores = []
if splits_file is not None:
with open(splits_file, "r") as json_file:
split_list = json.load(json_file)
else:
split_list = []
idx_list = list(range(len(all_labels_file_list)))
split_list = [random.sample(idx_list, k=n_files) for r in range(n_runs)]
for idx_sub_list in split_list:
scores.append(eval_list(all_preds_file_list[idx_sub_list], all_labels_file_list[idx_sub_list], mode=mode,))
if save_dir is not None:
with open(os.path.join(save_dir, "splits.json"), "w") as outfile:
json.dump(split_list, outfile)
with open(os.path.join(save_dir, "scores.json"), "w") as outfile:
json.dump(scores, outfile)
return np.mean(scores)
def bootstrap_list(
eval_lists, save_file=None, mode="pixel", base_pred_dir=None, base_label_dir=None,
):
scores = []
for pl_list in eval_lists:
pred_lists, label_lists = zip(*pl_list)
if base_pred_dir is not None:
if mode == "pixel":
pred_lists = [os.path.join(base_pred_dir, el) for el in pred_lists]
if mode == "sample":
pred_lists = [os.path.join(base_pred_dir, el + ".txt") for el in pred_lists]
if base_label_dir is not None:
label_lists = [os.path.join(base_label_dir, el) for el in label_lists]
score = eval_list(pred_lists, label_lists, mode=mode,)
if not np.isfinite(score):
score = 0
scores.append(score)
if save_file is not None:
with open(save_file, "w") as outfile:
json.dump(scores, outfile)
return np.mean(scores)
| 6,387 | 28.437788 | 115 |
py
|
FPI
|
FPI-master/var_ops.py
|
"""
Tools for manipulating sets of variables.
"""
import numpy as np
from keras import backend as K
import tensorflow as tf
import copy
def interpolate_vars(old_vars, new_vars, epsilon):
"""
Interpolate between two sequences of variables.
"""
return add_vars(old_vars, scale_vars(subtract_vars(new_vars, old_vars), epsilon))
def average_vars(var_seqs):
"""
Average a sequence of variable sequences.
"""
res = []
for variables in zip(*var_seqs):
res.append(np.mean(variables, axis=0))
return res
def subtract_vars(var_seq_1, var_seq_2):
"""
Subtract one variable sequence from another.
"""
return [v1 - v2 for v1, v2 in zip(var_seq_1, var_seq_2)]
def add_vars(var_seq_1, var_seq_2):
"""
Add two variable sequences.
"""
return [v1 + v2 for v1, v2 in zip(var_seq_1, var_seq_2)]
def scale_vars(var_seq, scale):
"""
Scale a variable sequence.
"""
return [v * scale for v in var_seq]
def update_aux(var_seq_train,var_seq_aux,var_list,tvar_list):
"""
Copy aux variables into the train set
return variable list with
trainable values from train and
auxiliary from aux
"""
var_seq = [var_train_i if lyr_i in tvar_list else var_aux_i for var_train_i,var_aux_i,lyr_i in zip(var_seq_train,var_seq_aux,var_list)]
return var_seq
| 1,366 | 23.410714 | 143 |
py
|
FPI
|
FPI-master/pred_simple.py
|
import os
import nibabel as nib
import numpy as np
import fpiSubmit
def predict_folder(input_dir,output_dir,mode,data):
fpiSubmit.predict_folder(input_dir,output_dir,mode,data)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=True, type=str)
parser.add_argument("-o", "--output", required=True, type=str)
parser.add_argument("-mode", type=str, default="pixel", help="can be either 'pixel' or 'sample'.", required=False)
parser.add_argument("-d","--data", type=str, help="can be either 'brain' or 'abdom'.", required=True)
args = parser.parse_args()
input_dir = args.input
output_dir = args.output
mode = args.mode
data = args.data
predict_folder(input_dir, output_dir, mode, data)
| 831 | 24.212121 | 118 |
py
|
FPI
|
FPI-master/eval_simple.py
|
import os
import nibabel as nib
import numpy as np
import fpiSubmit
import evalresults
def eval_folder(output_dir,label_dir,mode,data):
evalresults.eval_dir(output_dir,label_dir,mode=mode,save_file=os.path.join(output_dir,data+"_"+mode+"_score.txt"))
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--label", required=True, type=str)
parser.add_argument("-o", "--output", required=True, type=str)
parser.add_argument("-mode", type=str, default="pixel", help="can be either 'pixel' or 'sample'.", required=False)
parser.add_argument("-d","--data", type=str, help="can be either 'brain' or 'abdom'.", required=True)
args = parser.parse_args()
label_dir = args.label
output_dir = args.output
mode = args.mode
data = args.data
eval_folder(output_dir, label_dir, mode, data)
| 902 | 25.558824 | 118 |
py
|
FPI
|
FPI-master/models/wide_residual_network.py
|
#using code from https://github.com/asmith26/wide_resnets_keras.git
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import os
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
#sys.stdout = sys.stderr
# Prevent reaching to maximum recursion depth in `theano.tensor.grad`
#sys.setrecursionlimit(2 ** 20)
import numpy as np
np.random.seed(2 ** 10)
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, AveragePooling2D, BatchNormalization, Dropout, Input, Activation, Add, Dense, Flatten, UpSampling2D, Lambda, Concatenate
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras import losses
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend as K
import tensorflow as tf
from functools import partial
USE_BIAS = False # no bias in conv
WEIGHT_INIT = "he_normal"
WEIGHT_DECAY = 0.0005
CHANNEL_AXIS = -1
# Wide residual network http://arxiv.org/abs/1605.07146
def _wide_basic(n_input_plane, n_output_plane, stride, dropout_probability=0.0, direction='down'):
def f(net):
# format of conv_params:
# [ [nb_col="kernel width", nb_row="kernel height",
# subsample="(stride_vertical,stride_horizontal)",
# border_mode="same" or "valid"] ]
# B(3,3): orignal <<basic>> block
if direction == 'up':
conv_params = [ [3,3,(1,1),"same"],
[3,3,(1,1),"same"] ]
else:
conv_params = [ [3,3,stride,"same"],
[3,3,(1,1),"same"] ]
n_bottleneck_plane = n_output_plane
# Residual block
for i, v in enumerate(conv_params):
if i == 0:
if n_input_plane != n_output_plane:
net = BatchNormalization(axis=CHANNEL_AXIS)(net)
net = Activation("relu")(net)
convs = net
else:
convs = BatchNormalization(axis=CHANNEL_AXIS)(net)
convs = Activation("relu")(convs)
convs = Conv2D(n_bottleneck_plane,
(v[0],v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(convs)
if direction == 'up':
convs = UpSampling2D(stride)(convs)
else:
convs = BatchNormalization(axis=CHANNEL_AXIS)(convs)
convs = Activation("relu")(convs)
if dropout_probability > 0:
convs = Dropout(dropout_probability)(convs)
convs = Conv2D(n_bottleneck_plane,
(v[0],v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(convs)
# Shortcut Conntection: identity function or 1x1 convolutional
# (depends on difference between input & output shape - this
# corresponds to whether we are using the first block in each
# group; see _layer() ).
if n_input_plane != n_output_plane:
shortcut_stride = 1 if direction == 'up' else stride
shortcut = Conv2D(n_output_plane,
(1,1),
strides=shortcut_stride,
padding="same",
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(net)
if direction == 'up':
shortcut = UpSampling2D(stride)(shortcut)
else:
if stride == 1:
shortcut = net
elif direction == 'up':
shortcut = UpSampling2D(stride)(net)
else:
shortcut = AveragePooling2D(stride)(net)
return Add()([convs, shortcut])
return f
# "Stacking Residual Units on the same stage"
def _layer(block, n_input_plane, n_output_plane, count, stride, **kwargs):
def f(net):
net = block(n_input_plane, n_output_plane, stride, **kwargs)(net)
for i in range(2,int(count+1)):
net = block(n_output_plane, n_output_plane, stride=(1,1), **kwargs)(net)
return net
return f
def create_model():
logging.debug("Creating model...")
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
use_bias=use_bias)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
batch_norm = BatchNormalization(axis=CHANNEL_AXIS)(conv4)
relu = Activation("relu")(batch_norm)
# Classifier block
pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
flatten = Flatten()(pool)
predictions = Dense(units=nb_classes, kernel_initializer=weight_init, use_bias=use_bias,
kernel_regularizer=l2(weight_decay), activation="softmax")(flatten)
model = Model(inputs=inputs, outputs=predictions)
return model
def create_wide_residual_network_dec(input_shape,num_classes,depth,k=4,dropout_probability=0.0,final_activation=None):
if final_activation is None:#unspecified
final_activation = 'softmax' if num_classes > 1 \
else 'sigmoid'
assert((depth - 6) % 10 == 0), 'depth should be 10n+6'
n = (depth - 6) // 10
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k, 64*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
conv5 = _layer(block_fn, n_input_plane=n_stages[3], n_output_plane=n_stages[4], count=n, stride=(2,2))(conv4)# "Stage 4 (spatial size: 4x4)"
conv6 = _layer(block_fn, n_input_plane=n_stages[4], n_output_plane=n_stages[5], count=n, stride=(2,2))(conv5)# "Stage 5 (spatial size: 2x2)"
block_fn = partial(_wide_basic,direction='up')#decoder blocks,keep n=1
upconv1 = _layer(block_fn, n_input_plane=n_stages[5], n_output_plane=n_stages[2], count=1, stride=(2,2))(conv6)# "Stage 1up (spatial size: 4x4)"
upconv2 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[1], count=1, stride=(2,2))(upconv1)# "Stage 2up (spatial size: 8x8)"
upconv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[0], count=1, stride=(2,2))(upconv2)# "Stage 3up (spatial size: 16x16)"
upconv4 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=num_classes, count=1, stride=(2,2))(upconv3)# "Stage 4up (spatial size: 32x32)"
logit = Lambda(lambda x:x,name='logit')(upconv4)
if final_activation == 'linear':
outputs = logit
else:
outputs = Activation(final_activation)(logit)
loss_f = 'categorical_crossentropy' if num_classes > 1 \
else 'binary_crossentropy'
return Model(inputs, outputs), loss_f
def create_wide_residual_network_decdeeper(input_shape,num_classes,depth,k=4,dropout_probability=0.0,final_activation=None):
if final_activation is None:#unspecified
final_activation = 'softmax' if num_classes > 1 \
else 'sigmoid'
assert((depth - 7) % 12 == 0), 'depth should be 12n+7'
n = (depth - 7) // 12
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k, 64*k, 64*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
conv5 = _layer(block_fn, n_input_plane=n_stages[3], n_output_plane=n_stages[4], count=n, stride=(2,2))(conv4)# "Stage 4 (spatial size: 4x4)"
conv6 = _layer(block_fn, n_input_plane=n_stages[4], n_output_plane=n_stages[5], count=n, stride=(2,2))(conv5)# "Stage 5 (spatial size: 2x2)"
conv7 = _layer(block_fn, n_input_plane=n_stages[5], n_output_plane=n_stages[6], count=n, stride=(2,2))(conv6)# "Stage 6 (spatial size: 1x1)"
block_fn = partial(_wide_basic,direction='up')#decoder blocks,keep n=1
upconv1 = _layer(block_fn, n_input_plane=n_stages[6], n_output_plane=n_stages[2], count=1, stride=(2,2))(conv7)# "Stage 1up (spatial size: 2x2)"
upconv2 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[2], count=1, stride=(2,2))(upconv1)# "Stage 1up (spatial size: 4x4)"
upconv3 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[1], count=1, stride=(2,2))(upconv2)# "Stage 2up (spatial size: 8x8)"
upconv4 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[0], count=1, stride=(2,2))(upconv3)# "Stage 3up (spatial size: 16x16)"
upconv5 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=num_classes, count=1, stride=(2,2))(upconv4)# "Stage 4up (spatial size: 32x32)"
logit = Lambda(lambda x:x,name='logit')(upconv5)
if final_activation == 'linear':
outputs = logit
else:
outputs = Activation(final_activation)(logit)
loss_f = 'categorical_crossentropy' if num_classes > 1 \
else 'binary_crossentropy'
return Model(inputs, outputs), loss_f
def create_wide_residual_network_selfsup(input_shape,*args,**kwargs):
if 'net_f' in kwargs:
net_f = globals()[kwargs['net_f']]
del kwargs['net_f']
else:
net_f = create_wide_residual_network_dec
print('Building with network: ' + net_f.__name__+ '\n')
net_ss,loss_f = net_f(input_shape,*args,**kwargs)
optim = Adam(lr=0.001)
#optim = SGD(lr=0.001)
#optim = SGD(lr=0.1, momentum=0.9, nesterov=True)
net_ss.compile(optim,[loss_f],['acc'])
return net_ss
| 12,583 | 43.624113 | 164 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.