repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/predictor.py | from typing import Dict, Tuple
import torch
import torch.nn as nn
class LengthPredictor(nn.Module):
"""
Length Predictor
"""
_registry = dict()
def __init__(self):
super(LengthPredictor, self).__init__()
self.length_unit = None
def set_length_unit(self, length_unit):
self.length_unit = length_unit
def loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
raise NotImplementedError
def predict(self, ctx: torch.Tensor, src_mask:torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length
"""
raise NotImplementedError
@classmethod
def register(cls, name: str):
LengthPredictor._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return LengthPredictor._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 1,712 | 26.629032 | 120 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/utils.py | from typing import Tuple
import numpy as np
import torch
import torch.nn.functional as F
def discretized_mix_logistic_loss(x, means, logscales, logit_probs,
bin_size, lower, upper) -> torch.Tensor:
"""
loss for discretized mixture logistic distribution
Args:
x: [batch, ]
means: [batch, nmix]
logscales: [batch, nmix]
logit_probs:, [batch, nmix]
bin_size: float
The segment for cdf is [x-binsize, x+binsize]
lower: float
upper: float
Returns:
loss [batch]
"""
eps = 1e-12
# [batch, 1]
x = x.unsqueeze(1)
# [batch, nmix]
centered_x = x - means
if isinstance(logscales, float):
inv_stdv = np.exp(-logscales)
else:
inv_stdv = torch.exp(-logscales)
# [batch, nmix]
min_in = inv_stdv * (centered_x - bin_size)
plus_in = inv_stdv * (centered_x + bin_size)
x_in = inv_stdv * centered_x
# [batch, nmix]
cdf_min = torch.sigmoid(min_in)
cdf_plus = torch.sigmoid(plus_in)
# lower < x < upper
cdf_delta = cdf_plus - cdf_min
log_cdf_mid = torch.log(cdf_delta + eps)
log_cdf_approx = x_in - logscales - 2. * F.softplus(x_in) + np.log(2 * bin_size)
# x < lower
log_cdf_low = plus_in - F.softplus(plus_in)
# x > upper
log_cdf_up = -F.softplus(min_in)
# [batch, nmix]
log_cdf = torch.where(cdf_delta.gt(1e-5), log_cdf_mid, log_cdf_approx)
log_cdf = torch.where(x.ge(lower), log_cdf, log_cdf_low)
log_cdf = torch.where(x.le(upper), log_cdf, log_cdf_up)
# [batch]
loss = torch.logsumexp(log_cdf + logit_probs, dim=1) * -1.
return loss
def discretized_mix_logistic_topk(means, logscales, logit_probs,
range, bin_size, lower, upper, topk=1) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
topk for discretized mixture logistic distribution
Args:
means: [batch, nmix]
logscales: [batch, nmix]
logit_probs:, [batch, nmix]
range: int
range of x
bin_size: float
The segment for cdf is [x-binsize, x+binsize]
lower: float
upper: float
topk: int
Returns: Tensor1, Tensor2
Tensor1: log probs [batch, topk]
Tensor2: indexes for top k [batch, topk]
"""
eps = 1e-12
# [batch, 1, nmix]
means = means.unsqueeze(1)
logscales = logscales.unsqueeze(1)
logit_probs = logit_probs.unsqueeze(1)
# [1, 2 * range + 1, 1]
x = torch.arange(-range, range + 1, 1., device=means.device).unsqueeze(0).unsqueeze(2)
x = x.div(range)
# [batch, 2 * range + 1, nmix]
centered_x = x - means
if isinstance(logscales, float):
inv_stdv = np.exp(-logscales)
else:
inv_stdv = torch.exp(-logscales)
# [batch, 2 * range + 1, nmix]
min_in = inv_stdv * (centered_x - bin_size)
plus_in = inv_stdv * (centered_x + bin_size)
x_in = inv_stdv * centered_x
# [batch, 2 * range + 1, nmix]
cdf_min = torch.sigmoid(min_in)
cdf_plus = torch.sigmoid(plus_in)
# lower < x < upper
cdf_delta = cdf_plus - cdf_min
log_cdf_mid = torch.log(cdf_delta + eps)
log_cdf_approx = x_in - logscales - 2. * F.softplus(x_in) + np.log(2 * bin_size)
# x < lower
log_cdf_low = plus_in - F.softplus(plus_in)
# x > upper
log_cdf_up = -F.softplus(min_in)
# [batch, 2 * range + 1, nmix]
log_cdf = torch.where(cdf_delta.gt(1e-5), log_cdf_mid, log_cdf_approx)
log_cdf = torch.where(x.ge(lower), log_cdf, log_cdf_low)
log_cdf = torch.where(x.le(upper), log_cdf, log_cdf_up)
# [batch, 2 * range + 1]
log_probs = torch.logsumexp(log_cdf + logit_probs, dim=2)
log_probs, idx = log_probs.topk(topk, dim=1)
return log_probs, idx - range
| 3,823 | 29.592 | 114 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/diff_discretized_mix_logistic.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.priors.length_predictors.predictor import LengthPredictor
from flownmt.modules.priors.length_predictors.utils import discretized_mix_logistic_loss, discretized_mix_logistic_topk
class DiffDiscreteMixLogisticLengthPredictor(LengthPredictor):
def __init__(self, features, max_src_length, diff_range, nmix=1, dropout=0.0):
super(DiffDiscreteMixLogisticLengthPredictor, self).__init__()
self.max_src_length = max_src_length
self.range = diff_range
self.nmix = nmix
self.features = features
self.dropout = dropout
self.ctx_proj = None
self.diff = None
def set_length_unit(self, length_unit):
self.length_unit = length_unit
self.ctx_proj = nn.Sequential(nn.Linear(self.features, self.features), nn.ELU())
self.diff = nn.Linear(self.features, 3 * self.nmix)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.ctx_proj[0].bias, 0.)
nn.init.uniform_(self.diff.weight, -0.1, 0.1)
nn.init.constant_(self.diff.bias, 0.)
def forward(self, ctx):
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
# [batch, 3 * nmix]
coeffs = self.diff(ctx)
# [batch, nmix]
logit_probs = F.log_softmax(coeffs[:, :self.nmix], dim=1)
mu = coeffs[:, self.nmix:self.nmix * 2]
log_scale = coeffs[:, self.nmix * 2:]
return mu, log_scale, logit_probs
@overrides
def loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
src_lengths = src_mask.sum(dim=1).float()
tgt_lengths = tgt_mask.sum(dim=1).float()
mu, log_scale, logit_probs = self(ctx, src_lengths.long())
x = (tgt_lengths - src_lengths).div(self.range).clamp(min=-1, max=1)
bin_size = 0.5 / self.range
lower = bin_size - 1.0
upper = 1.0 - bin_size
loss = discretized_mix_logistic_loss(x, mu, log_scale, logit_probs, bin_size, lower, upper)
return loss
@overrides
def predict(self, ctx: torch.Tensor, src_mask:torch.Tensor, topk: int = 1) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: Tensor1, LongTensor2
Tensor1: log probs for each length
LongTensor2: tensor for lengths [batch, topk]
"""
bin_size = 0.5 / self.range
lower = bin_size - 1.0
upper = 1.0 - bin_size
# [batch]
src_lengths = src_mask.sum(dim=1).long()
mu, log_scale, logit_probs = self(ctx, src_lengths)
# [batch, topk]
log_probs, diffs = discretized_mix_logistic_topk(mu, log_scale, logit_probs,
self.range, bin_size, lower, upper, topk=topk)
lengths = (diffs + src_lengths.unsqueeze(1)).clamp(min=self.length_unit)
res = lengths.fmod(self.length_unit)
padding = (self.length_unit - res).fmod(self.length_unit)
lengths = lengths + padding
return log_probs, lengths
@classmethod
def from_params(cls, params: Dict) -> 'DiffDiscreteMixLogisticLengthPredictor':
return DiffDiscreteMixLogisticLengthPredictor(**params)
DiffDiscreteMixLogisticLengthPredictor.register('diff_logistic')
| 4,046 | 39.069307 | 120 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/__init__.py | from flownmt.modules.priors.length_predictors.predictor import LengthPredictor
from flownmt.modules.priors.length_predictors.diff_discretized_mix_logistic import DiffDiscreteMixLogisticLengthPredictor
from flownmt.modules.priors.length_predictors.diff_softmax import DiffSoftMaxLengthPredictor
| 294 | 72.75 | 121 | py |
flowseq | flowseq-master/flownmt/modules/encoders/encoder.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
class Encoder(nn.Module):
"""
Src Encoder to encode source sentence
"""
_registry = dict()
def __init__(self, vocab_size, embed_dim, padding_idx):
super(Encoder, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.embed.weight, -0.1, 0.1)
if self.embed.padding_idx is not None:
with torch.no_grad():
self.embed.weight[self.embed.padding_idx].fill_(0)
@overrides
def forward(self, src_sents, masks=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encoding src sentences into src encoding representations.
Args:
src_sents: Tensor [batch, length]
masks: Tensor or None [batch, length]
Returns: Tensor1, Tensor2
Tensor1: tensor for src encoding [batch, length, hidden_size]
Tensor2: tensor for global state [batch, hidden_size]
"""
raise NotImplementedError
def init(self, src_sents, masks=None, init_scale=1.0) -> torch.Tensor:
raise NotImplementedError
@classmethod
def register(cls, name: str):
Encoder._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Encoder._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 1,549 | 28.245283 | 82 | py |
flowseq | flowseq-master/flownmt/modules/encoders/transformer.py | from overrides import overrides
from typing import Dict, Tuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.encoders.encoder import Encoder
from flownmt.nnet.transformer import TransformerEncoderLayer
from flownmt.nnet.positional_encoding import PositionalEncoding
class TransformerCore(nn.Module):
def __init__(self, embed, num_layers, latent_dim, hidden_size, heads, dropout=0.0, max_length=100):
super(TransformerCore, self).__init__()
self.embed = embed
self.padding_idx = embed.padding_idx
embed_dim = embed.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
assert embed_dim == latent_dim
layers = [TransformerEncoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.pos_enc = PositionalEncoding(latent_dim, self.padding_idx, max_length + 1)
self.reset_parameters()
def reset_parameters(self):
pass
@overrides
def forward(self, src_sents, masks) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, leagth, embed_dim]
x = self.embed_scale * self.embed(src_sents)
x += self.pos_enc(src_sents)
x = F.dropout(x, p=0.2, training=self.training)
# [batch, leagth, latent_dim]
key_mask = masks.eq(0)
if not key_mask.any():
key_mask = None
for layer in self.layers:
x = layer(x, key_mask)
x *= masks.unsqueeze(2)
# [batch, latent_dim]
batch = src_sents.size(0)
idx = masks.sum(dim=1).long() - 1
batch_idx = torch.arange(0, batch).long().to(idx.device)
ctx = x[batch_idx, idx]
return x, ctx
class TransformerEncoder(Encoder):
"""
Src Encoder to encode source sentence with Transformer
"""
def __init__(self, vocab_size, embed_dim, padding_idx, num_layers, latent_dim, hidden_size, heads, dropout=0.0, max_length=100):
super(TransformerEncoder, self).__init__(vocab_size, embed_dim, padding_idx)
self.core = TransformerCore(self.embed, num_layers, latent_dim, hidden_size, heads, dropout=dropout, max_length=max_length)
@overrides
def forward(self, src_sents, masks=None) -> Tuple[torch.Tensor, torch.Tensor]:
src_enc, ctx = self.core(src_sents, masks=masks)
return src_enc, ctx
def init(self, src_sents, masks=None, init_scale=1.0) -> torch.Tensor:
with torch.no_grad():
src_enc, _ = self.core(src_sents, masks=masks)
return src_enc
@classmethod
def from_params(cls, params: Dict) -> "TransformerEncoder":
return TransformerEncoder(**params)
TransformerEncoder.register('transformer')
| 2,782 | 34.227848 | 132 | py |
flowseq | flowseq-master/flownmt/modules/encoders/__init__.py | from flownmt.modules.encoders.encoder import Encoder
from flownmt.modules.encoders.rnn import RecurrentEncoder
from flownmt.modules.encoders.transformer import TransformerEncoder
| 179 | 44 | 67 | py |
flowseq | flowseq-master/flownmt/modules/encoders/rnn.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.encoders.encoder import Encoder
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
class RecurrentCore(nn.Module):
def __init__(self, embed, rnn_mode, num_layers, latent_dim, hidden_size, dropout=0.0):
super(RecurrentCore, self).__init__()
self.embed = embed
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
self.rnn = RNN(embed.embedding_dim, hidden_size // 2,
num_layers=num_layers, batch_first=True, bidirectional=True)
self.enc_proj = nn.Sequential(nn.Linear(hidden_size, latent_dim), nn.ELU())
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.enc_proj[0].bias, 0.)
@overrides
def forward(self, src_sents, masks) -> Tuple[torch.Tensor, torch.Tensor]:
word_embed = F.dropout(self.embed(src_sents), p=0.2, training=self.training)
lengths = masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(word_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
# [batch, length, hidden_size]
src_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=masks.size(1))
# [batch, length, latent_dim]
src_enc = self.enc_proj(src_enc).mul(masks.unsqueeze(2))
# [batch, latent_dim]
batch = src_sents.size(0)
idx = lengths - 1
batch_idx = torch.arange(0, batch).long().to(idx.device)
ctx = src_enc[batch_idx, idx]
return src_enc, ctx
class RecurrentEncoder(Encoder):
"""
Src Encoder to encode source sentence with Recurrent Neural Networks
"""
def __init__(self, vocab_size, embed_dim, padding_idx, rnn_mode, num_layers, latent_dim, hidden_size, dropout=0.0):
super(RecurrentEncoder, self).__init__(vocab_size, embed_dim, padding_idx)
self.core = RecurrentCore(self.embed, rnn_mode, num_layers, latent_dim, hidden_size, dropout=dropout)
@overrides
def forward(self, src_sents, masks=None) -> Tuple[torch.Tensor, torch.Tensor]:
src_enc, ctx = self.core(src_sents, masks=masks)
return src_enc, ctx
def init(self, src_sents, masks=None, init_scale=1.0) -> torch.Tensor:
with torch.no_grad():
src_enc, _ = self.core(src_sents, masks=masks)
return src_enc
@classmethod
def from_params(cls, params: Dict) -> "RecurrentEncoder":
return RecurrentEncoder(**params)
RecurrentEncoder.register('rnn')
| 2,897 | 36.153846 | 119 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/shift_rnn.py | from overrides import overrides
from typing import Tuple, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.nnet.weightnorm import LinearWeightNorm
from flownmt.modules.posteriors.posterior import Posterior
from flownmt.nnet.attention import GlobalAttention
class ShiftRecurrentCore(nn.Module):
def __init__(self, embed, rnn_mode, num_layers, latent_dim, hidden_size, bidirectional=True, use_attn=False, dropout=0.0, dropword=0.0):
super(ShiftRecurrentCore, self).__init__()
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
self.tgt_embed = embed
assert num_layers == 1
self.bidirectional = bidirectional
if bidirectional:
self.rnn = RNN(embed.embedding_dim, hidden_size // 2, num_layers=1, batch_first=True, bidirectional=True)
else:
self.rnn = RNN(embed.embedding_dim, hidden_size, num_layers=1, batch_first=True, bidirectional=False)
self.use_attn = use_attn
if use_attn:
self.attn = GlobalAttention(latent_dim, hidden_size, hidden_size)
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size * 2, hidden_size), nn.ELU())
else:
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ELU())
self.dropout = dropout
self.dropout2d = nn.Dropout2d(dropword) if dropword > 0. else None # drop entire tokens
self.mu = LinearWeightNorm(hidden_size, latent_dim, bias=True)
self.logvar = LinearWeightNorm(hidden_size, latent_dim, bias=True)
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.bidirectional:
# split into fwd and bwd
fwd_tgt_enc, bwd_tgt_enc = tgt_enc.chunk(2, dim=2) # (batch_size, seq_len, hidden_size // 2)
pad_vector = fwd_tgt_enc.new_zeros((fwd_tgt_enc.size(0), 1, fwd_tgt_enc.size(2)))
pad_fwd_tgt_enc = torch.cat([pad_vector, fwd_tgt_enc], dim=1)
pad_bwd_tgt_enc = torch.cat([bwd_tgt_enc, pad_vector], dim=1)
tgt_enc = torch.cat([pad_fwd_tgt_enc[:, :-1], pad_bwd_tgt_enc[:, 1:]], dim=2)
else:
pad_vector = tgt_enc.new_zeros((tgt_enc.size(0), 1, tgt_enc.size(2)))
tgt_enc = torch.cat([pad_vector, tgt_enc], dim=1)[:, :-1]
if self.use_attn:
ctx = self.attn(tgt_enc, src_enc, key_mask=src_masks.eq(0))
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu(ctx) * tgt_masks.unsqueeze(2)
logvar = self.logvar(ctx) * tgt_masks.unsqueeze(2)
return mu, logvar
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.bidirectional:
fwd_tgt_enc, bwd_tgt_enc = tgt_enc.chunk(2, dim=2) # (batch_size, seq_len, hidden_size // 2)
pad_vector = fwd_tgt_enc.new_zeros((fwd_tgt_enc.size(0), 1, fwd_tgt_enc.size(2)))
pad_fwd_tgt_enc = torch.cat([pad_vector, fwd_tgt_enc], dim=1)
pad_bwd_tgt_enc = torch.cat([bwd_tgt_enc, pad_vector], dim=1)
tgt_enc = torch.cat([pad_fwd_tgt_enc[:, :-1], pad_bwd_tgt_enc[:, 1:]], dim=2)
else:
pad_vector = tgt_enc.new_zeros((tgt_enc.size(0), 1, tgt_enc.size(2)))
tgt_enc = torch.cat([pad_vector, tgt_enc], dim=1)[:, :-1]
if self.use_attn:
ctx = self.attn.init(tgt_enc, src_enc, key_mask=src_masks.eq(0), init_scale=init_scale)
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu.init(ctx, init_scale=0.05 * init_scale) if init_mu else self.mu(ctx)
logvar = self.logvar.init(ctx, init_scale=0.05 * init_scale) if init_var else self.logvar(ctx)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
class ShiftRecurrentPosterior(Posterior):
"""
Posterior with Recurrent Neural Networks
"""
def __init__(self, vocab_size, embed_dim, padding_idx, rnn_mode, num_layers, latent_dim, hidden_size,
bidirectional=True, use_attn=False, dropout=0.0, dropword=0.0, _shared_embed=None):
super(ShiftRecurrentPosterior, self).__init__(vocab_size, embed_dim, padding_idx, _shared_embed=_shared_embed)
self.core = ShiftRecurrentCore(self.tgt_embed, rnn_mode, num_layers, latent_dim, hidden_size,
bidirectional=bidirectional, use_attn=use_attn, dropout=dropout, dropword=dropword)
def target_embed_weight(self):
if isinstance(self.core, nn.DataParallel):
return self.core.module.tgt_embedd.weight
else:
return self.core.tgt_embed.weight
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
return self.core(tgt_sents, tgt_masks, src_enc, src_masks)
@overrides
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core(tgt_sents, tgt_masks, src_enc, src_masks)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, nsamples=nsamples, random=random)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
return z, log_probs
@overrides
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=init_mu, init_var=init_var)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, random=True)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
z = z.squeeze(1)
log_probs = log_probs.squeeze(1)
return z, log_probs
@classmethod
def from_params(cls, params: Dict) -> "ShiftRecurrentPosterior":
return ShiftRecurrentPosterior(**params)
ShiftRecurrentPosterior.register('shift_rnn')
| 7,669 | 49.460526 | 143 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/transformer.py | from overrides import overrides
from typing import Tuple, Dict
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.nnet.weightnorm import LinearWeightNorm
from flownmt.nnet.transformer import TransformerDecoderLayer
from flownmt.nnet.positional_encoding import PositionalEncoding
from flownmt.modules.posteriors.posterior import Posterior
class TransformerCore(nn.Module):
def __init__(self, embed, num_layers, latent_dim, hidden_size, heads, dropout=0.0, dropword=0.0, max_length=100):
super(TransformerCore, self).__init__()
self.tgt_embed = embed
self.padding_idx = embed.padding_idx
embed_dim = embed.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
assert embed_dim == latent_dim
layers = [TransformerDecoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.pos_enc = PositionalEncoding(latent_dim, self.padding_idx, max_length + 1)
self.dropword = dropword # drop entire tokens
self.mu = LinearWeightNorm(latent_dim, latent_dim, bias=True)
self.logvar = LinearWeightNorm(latent_dim, latent_dim, bias=True)
self.reset_parameters()
def reset_parameters(self):
pass
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
x = self.embed_scale * self.tgt_embed(tgt_sents)
x = F.dropout2d(x, p=self.dropword, training=self.training)
x += self.pos_enc(tgt_sents)
x = F.dropout(x, p=0.2, training=self.training)
mask = tgt_masks.eq(0)
key_mask = src_masks.eq(0)
for layer in self.layers:
x = layer(x, mask, src_enc, key_mask)
mu = self.mu(x) * tgt_masks.unsqueeze(2)
logvar = self.logvar(x) * tgt_masks.unsqueeze(2)
return mu, logvar
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
x = self.embed_scale * self.tgt_embed(tgt_sents)
x = F.dropout2d(x, p=self.dropword, training=self.training)
x += self.pos_enc(tgt_sents)
x = F.dropout(x, p=0.2, training=self.training)
mask = tgt_masks.eq(0)
key_mask = src_masks.eq(0)
for layer in self.layers:
x = layer.init(x, mask, src_enc, key_mask, init_scale=init_scale)
x = x * tgt_masks.unsqueeze(2)
mu = self.mu.init(x, init_scale=0.05 * init_scale) if init_mu else self.mu(x)
logvar = self.logvar.init(x, init_scale=0.05 * init_scale) if init_var else self.logvar(x)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
class TransformerPosterior(Posterior):
"""
Posterior with Transformer
"""
def __init__(self, vocab_size, embed_dim, padding_idx, num_layers, latent_dim, hidden_size, heads,
dropout=0.0, dropword=0.0, max_length=100, _shared_embed=None):
super(TransformerPosterior, self).__init__(vocab_size, embed_dim, padding_idx, _shared_embed=_shared_embed)
self.core = TransformerCore(self.tgt_embed, num_layers, latent_dim, hidden_size, heads,
dropout=dropout, dropword=dropword, max_length=max_length)
def target_embed_weight(self):
if isinstance(self.core, nn.DataParallel):
return self.core.module.tgt_embedd.weight
else:
return self.core.tgt_embed.weight
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
return self.core(tgt_sents, tgt_masks, src_enc, src_masks)
@overrides
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core(tgt_sents, tgt_masks, src_enc, src_masks)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, nsamples=nsamples, random=random)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
return z, log_probs
@overrides
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=init_mu, init_var=init_var)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, random=True)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
z = z.squeeze(1)
log_probs = log_probs.squeeze(1)
return z, log_probs
@classmethod
def from_params(cls, params: Dict) -> "TransformerPosterior":
return TransformerPosterior(**params)
TransformerPosterior.register('transformer')
| 5,026 | 42.713043 | 143 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/__init__.py | from flownmt.modules.posteriors.posterior import Posterior
from flownmt.modules.posteriors.rnn import RecurrentPosterior
from flownmt.modules.posteriors.shift_rnn import ShiftRecurrentPosterior
from flownmt.modules.posteriors.transformer import TransformerPosterior
| 266 | 52.4 | 72 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/rnn.py | from overrides import overrides
from typing import Tuple, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.nnet.weightnorm import LinearWeightNorm
from flownmt.modules.posteriors.posterior import Posterior
from flownmt.nnet.attention import GlobalAttention
class RecurrentCore(nn.Module):
def __init__(self, embed, rnn_mode, num_layers, latent_dim, hidden_size, use_attn=False, dropout=0.0, dropword=0.0):
super(RecurrentCore, self).__init__()
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
self.tgt_embed = embed
self.rnn = RNN(embed.embedding_dim, hidden_size // 2,
num_layers=num_layers, batch_first=True, bidirectional=True)
self.use_attn = use_attn
if use_attn:
self.attn = GlobalAttention(latent_dim, hidden_size, hidden_size, hidden_features=hidden_size)
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size * 2, hidden_size), nn.ELU())
else:
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ELU())
self.dropout = dropout
self.dropout2d = nn.Dropout2d(dropword) if dropword > 0. else None # drop entire tokens
self.mu = LinearWeightNorm(hidden_size, latent_dim, bias=True)
self.logvar = LinearWeightNorm(hidden_size, latent_dim, bias=True)
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.use_attn:
ctx = self.attn(tgt_enc, src_enc, key_mask=src_masks.eq(0))
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu(ctx) * tgt_masks.unsqueeze(2)
logvar = self.logvar(ctx) * tgt_masks.unsqueeze(2)
return mu, logvar
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.use_attn:
ctx = self.attn.init(tgt_enc, src_enc, key_mask=src_masks.eq(0), init_scale=init_scale)
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu.init(ctx, init_scale=0.05 * init_scale) if init_mu else self.mu(ctx)
logvar = self.logvar.init(ctx, init_scale=0.05 * init_scale) if init_var else self.logvar(ctx)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
class RecurrentPosterior(Posterior):
"""
Posterior with Recurrent Neural Networks
"""
def __init__(self, vocab_size, embed_dim, padding_idx, rnn_mode, num_layers, latent_dim, hidden_size,
use_attn=False, dropout=0.0, dropword=0.0, _shared_embed=None):
super(RecurrentPosterior, self).__init__(vocab_size, embed_dim, padding_idx, _shared_embed=_shared_embed)
self.core = RecurrentCore(self.tgt_embed, rnn_mode, num_layers, latent_dim, hidden_size,
use_attn=use_attn, dropout=dropout, dropword=dropword)
def target_embed_weight(self):
if isinstance(self.core, nn.DataParallel):
return self.core.module.tgt_embedd.weight
else:
return self.core.tgt_embed.weight
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
return self.core(tgt_sents, tgt_masks, src_enc, src_masks)
@overrides
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core(tgt_sents, tgt_masks, src_enc, src_masks)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, nsamples=nsamples, random=random)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
return z, log_probs
@overrides
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=init_mu, init_var=init_var)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, random=True)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
z = z.squeeze(1)
log_probs = log_probs.squeeze(1)
return z, log_probs
@classmethod
def from_params(cls, params: Dict) -> "RecurrentPosterior":
return RecurrentPosterior(**params)
RecurrentPosterior.register('rnn')
| 6,032 | 47.264 | 143 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/posterior.py | import math
from typing import Dict, Tuple
import torch
import torch.nn as nn
class Posterior(nn.Module):
"""
posterior class
"""
_registry = dict()
def __init__(self, vocab_size, embed_dim, padding_idx, _shared_embed=None):
super(Posterior, self).__init__()
if _shared_embed is None:
self.tgt_embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.reset_parameters()
else:
self.tgt_embed = _shared_embed
def reset_parameters(self):
nn.init.uniform_(self.tgt_embed.weight, -0.1, 0.1)
if self.tgt_embed.padding_idx is not None:
with torch.no_grad():
self.tgt_embed.weight[self.tgt_embed.padding_idx].fill_(0)
def target_embed_weight(self):
raise NotImplementedError
@staticmethod
def reparameterize(mu, logvar, mask, nsamples=1, random=True):
# [batch, length, dim]
size = mu.size()
std = logvar.mul(0.5).exp()
# [batch, nsamples, length, dim]
if random:
eps = torch.randn(size[0], nsamples, *size[1:], device=mu.device)
eps *= mask.view(size[0], 1, size[1], 1)
else:
eps = mu.new_zeros(size[0], nsamples, *size[1:])
return eps.mul(std.unsqueeze(1)).add(mu.unsqueeze(1)), eps
@staticmethod
def log_probability(z, eps, mu, logvar, mask):
size = eps.size()
nz = size[3]
# [batch, nsamples, length, nz]
log_probs = logvar.unsqueeze(1) + eps.pow(2)
# [batch, 1]
cc = mask.sum(dim=1, keepdim=True) * (math.log(math.pi * 2.) * nz)
# [batch, nsamples, length * nz] --> [batch, nsamples]
log_probs = log_probs.view(size[0], size[1], -1).sum(dim=2) + cc
return log_probs * -0.5
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
raise NotImplementedError
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
tgt_masks: Tensor [batch, tgt_length]
tensor for target masks
src_enc: Tensor [batch, src_length, hidden_size]
tensor for source encoding
src_masks: Tensor [batch, src_length]
tensor for source masks
nsamples: int
number of samples
random: bool
if True, perform random sampling. Otherwise, return mean.
Returns: Tensor1, Tensor2
Tensor1: samples from the posterior [batch, nsamples, tgt_length, nz]
Tensor2: log probabilities [batch, nsamples]
"""
raise NotImplementedError
@classmethod
def register(cls, name: str):
Posterior._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Posterior._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 3,375 | 33.10101 | 143 | py |
flowseq | flowseq-master/flownmt/flows/nmt.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
from flownmt.flows.flow import Flow
from flownmt.flows.actnorm import ActNormFlow
from flownmt.flows.linear import InvertibleMultiHeadFlow
from flownmt.flows.couplings.coupling import NICE
from flownmt.utils import squeeze, unsqueeze, split, unsplit
class NMTFlowPOSAttnUnit(Flow):
"""
Unit for POS Attention
"""
def __init__(self, features, src_features, hidden_features=None, inverse=False,
transform='affine', heads=1, max_length=100, dropout=0.0):
super(NMTFlowPOSAttnUnit, self).__init__(inverse)
self.actnorm = ActNormFlow(features, inverse=inverse)
self.coupling_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='up',
transform=transform, type='self_attn', heads=heads,
dropout=dropout, pos_enc='attn', max_length=max_length)
self.coupling_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='down',
transform=transform, type='self_attn', heads=heads,
dropout=dropout, pos_enc='add', max_length=max_length)
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.forward(input, tgt_mask)
out, logdet = self.coupling_up.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# block1 dim=2, type=continuous
out, logdet_accum = self.coupling_down.backward(input, tgt_mask, src, src_mask)
out, logdet = self.coupling_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.init(data, tgt_mask, init_scale=init_scale)
out, logdet = self.coupling_up.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlowUnit(Flow):
"""
One Unit of NMTFlowStep
"""
def __init__(self, features, src_features, hidden_features=None, inverse=False, transform='affine',
coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, max_length=100,
dropout=0.0, split_timestep=True):
super(NMTFlowUnit, self).__init__(inverse)
# dim=2, type=continuous
self.coupling1_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='up',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.coupling1_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='down',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.actnorm1 = ActNormFlow(features, inverse=inverse)
# dim=2, type=skip
self.coupling2_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='skip', order='up',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.coupling2_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='skip', order='down',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.split_timestep = split_timestep
if split_timestep:
self.actnorm2 = ActNormFlow(features, inverse=inverse)
# dim=1, type=skip
self.coupling3_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=1, split_type='skip', order='up',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.coupling3_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=1, split_type='skip', order='down',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
else:
self.actnorm2 = None
self.coupling3_up = None
self.coupling3_down = None
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# block1 dim=2, type=continuous
out, logdet_accum = self.coupling1_up.forward(input, tgt_mask, src, src_mask)
out, logdet = self.coupling1_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm1.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block2 dim=2, type=skip
out, logdet = self.coupling2_up.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling2_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if self.split_timestep:
# ================================================================================
out, logdet = self.actnorm2.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block3 dim=1, type=skip
out, logdet = self.coupling3_up.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling3_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self.split_timestep:
# block3 dim=1, type=skip
out, logdet_accum = self.coupling3_down.backward(input, tgt_mask, src, src_mask)
out, logdet = self.coupling3_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm2.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
else:
out, logdet_accum = input, 0
# block2 dim=2, type=skip
out, logdet = self.coupling2_down.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling2_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm1.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block1 dim=2, type=continuous
out, logdet = self.coupling1_down.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling1_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
# block1 dim=2, type=continuous
out, logdet_accum = self.coupling1_up.init(data, tgt_mask, src, src_mask, init_scale=init_scale)
out, logdet = self.coupling1_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm1.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block2 dim=2, type=skip
out, logdet = self.coupling2_up.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling2_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if self.split_timestep:
# ================================================================================
out, logdet = self.actnorm2.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block3 dim=1, type=skip
out, logdet = self.coupling3_up.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling3_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlowStep(Flow):
"""
One step of NMTFlow
"""
def __init__(self, features, src_features, hidden_features=None, inverse=False, transform='affine',
coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, max_length=100,
dropout=0.0, split_timestep=True):
super(NMTFlowStep, self).__init__(inverse)
self.actnorm1 = ActNormFlow(features, inverse=inverse)
self.linear1 = InvertibleMultiHeadFlow(features, type='A', inverse=inverse)
self.unit1 = NMTFlowUnit(features, src_features, hidden_features=hidden_features, inverse=inverse,
transform=transform, coupling_type=coupling_type, kernel_size=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, max_length=max_length, split_timestep=split_timestep)
self.actnorm2 = ActNormFlow(features, inverse=inverse)
self.linear2 = InvertibleMultiHeadFlow(features, type='B', inverse=inverse)
self.unit2 = NMTFlowUnit(features, src_features, hidden_features=hidden_features, inverse=inverse,
transform=transform, coupling_type=coupling_type, kernel_size=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, max_length=max_length, split_timestep=split_timestep)
def sync(self):
self.linear1.sync()
self.linear2.sync()
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm1.forward(input, tgt_mask)
out, logdet = self.linear1.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit1.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm2.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.linear2.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit2.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.unit2.backward(input, tgt_mask, src, src_mask)
out, logdet = self.linear2.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm2.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit1.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.linear1.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm1.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm1.init(data, tgt_mask, init_scale=init_scale)
out, logdet = self.linear1.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit1.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm2.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.linear2.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit2.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlowBlock(Flow):
"""
NMT Flow Block
"""
def __init__(self, num_steps, features, src_features, hidden_features=None, inverse=False, prior=False, factor=2,
transform='affine', coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, max_length=100,
dropout=0.0, pos_attn=False):
super(NMTFlowBlock, self).__init__(inverse)
if pos_attn:
self.pos_attn = NMTFlowPOSAttnUnit(features, src_features,hidden_features=hidden_features,
inverse=inverse, transform=transform, heads=heads,
max_length=max_length, dropout=dropout)
else:
self.pos_attn = None
steps = [NMTFlowStep(features, src_features, hidden_features=hidden_features, inverse=inverse,
transform=transform, coupling_type=coupling_type, kernel_size=kernel_size,
rnn_mode=rnn_mode, heads=heads, max_length=max_length,
dropout=dropout, split_timestep=prior) for _ in range(num_steps)]
self.steps = nn.ModuleList(steps)
if prior:
assert features % factor == 0, 'features {} should divide factor {}'.format(features, factor)
self.prior = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='up', factor=factor,
transform=transform, type=coupling_type, kernel=kernel_size,
heads=heads, rnn_mode=rnn_mode, pos_enc='add', max_length=max_length, dropout=dropout)
self.z_features = features - features // factor
assert self.z_features == self.prior.z1_channels
else:
self.prior = None
self.z_features = features
def sync(self):
for step in self.steps:
step.sync()
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch]
if self.pos_attn is None:
logdet_accum = input.new_zeros(input.size(0))
out = input
else:
out, logdet_accum = self.pos_attn.forward(input, tgt_mask, src, src_mask)
for step in self.steps:
out, logdet = step.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if self.prior is not None:
out, logdet = self.prior.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self.prior is None:
logdet_accum = input.new_zeros(input.size(0))
out = input
else:
out, logdet_accum = self.prior.backward(input, tgt_mask, src, src_mask)
for step in reversed(self.steps):
out, logdet = step.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if self.pos_attn is not None:
out, logdet = self.pos_attn.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch]
if self.pos_attn is None:
logdet_accum = data.new_zeros(data.size(0))
out = data
else:
out, logdet_accum = self.pos_attn.init(data, tgt_mask, src, src_mask, init_scale=init_scale)
for step in self.steps:
out, logdet = step.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if self.prior is not None:
out, logdet = self.prior.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlow(Flow):
"""
NMT Flow
"""
def __init__(self, levels, num_steps, features, src_features, factors, hidden_features=None, inverse=False,
transform='affine', coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, pos_enc='add', max_length=100, dropout=0.0):
super(NMTFlow, self).__init__(inverse)
assert levels == len(num_steps)
assert levels == len(factors) + 1
blocks = []
self.levels = levels
self.features = features
pos_attn = coupling_type == 'self_attn' and pos_enc == 'attn'
for level in range(levels):
if level == levels - 1:
block = NMTFlowBlock(num_steps[level], features, src_features, hidden_features=hidden_features,
inverse=inverse, prior=False, coupling_type=coupling_type, transform=transform,
kernel_size=kernel_size, rnn_mode=rnn_mode, heads=heads, max_length=max_length,
dropout=dropout, pos_attn=pos_attn)
else:
factor = factors[level]
block = NMTFlowBlock(num_steps[level], features, src_features, hidden_features=hidden_features,
inverse=inverse, prior=True, factor=factor, coupling_type=coupling_type,
transform=transform,kernel_size=kernel_size, rnn_mode=rnn_mode, heads=heads,
max_length=max_length, dropout=dropout, pos_attn=pos_attn)
features = block.z_features * 2
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
def sync(self):
for block in self.blocks:
block.sync()
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = input.new_zeros(input.size(0))
out = input
outputs = []
for i, block in enumerate(self.blocks):
out, logdet = block.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if i < self.levels - 1:
out1, out2 = split(out, block.z_features)
outputs.append(out2)
out, tgt_mask = squeeze(out1, tgt_mask)
for _ in range(self.levels - 1):
out2 = outputs.pop()
out = unsqueeze(out)
out = unsplit([out, out2])
assert len(outputs) == 0
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
outputs = []
masks = []
out = input
for i in range(self.levels - 1):
out1, out2 = split(out, self.blocks[i].z_features)
outputs.append(out2)
masks.append(tgt_mask)
out, tgt_mask = squeeze(out1, tgt_mask)
logdet_accum = input.new_zeros(input.size(0))
for i, block in enumerate(reversed(self.blocks)):
if i > 0:
out2 = outputs.pop()
tgt_mask = masks.pop()
out1 = unsqueeze(out)
out = unsplit([out1, out2])
out, logdet = block.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
assert len(outputs) == 0
assert len(masks) == 0
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = data.new_zeros(data.size(0))
out = data
outputs = []
for i, block in enumerate(self.blocks):
out, logdet = block.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if i < self.levels - 1:
out1, out2 = split(out, block.z_features)
outputs.append(out2)
out, tgt_mask = squeeze(out1, tgt_mask)
for _ in range(self.levels - 1):
out2 = outputs.pop()
out = unsqueeze(out)
out = unsplit([out, out2])
assert len(outputs) == 0
return out, logdet_accum
@classmethod
def from_params(cls, params: Dict) -> "NMTFlow":
return NMTFlow(**params)
NMTFlow.register('nmt')
| 24,648 | 44.815985 | 144 | py |
flowseq | flowseq-master/flownmt/flows/flow.py | from typing import Dict, Tuple
import torch
import torch.nn as nn
class Flow(nn.Module):
"""
Normalizing Flow base class
"""
_registry = dict()
def __init__(self, inverse):
super(Flow, self).__init__()
self.inverse = inverse
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
*input: input [batch, *input_size]
Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch]
out, the output of the flow
logdet, the log determinant of :math:`\partial output / \partial input`
"""
raise NotImplementedError
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
*input: input [batch, *input_size]
Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch]
out, the output of the flow
logdet, the log determinant of :math:`\partial output / \partial input`
"""
raise NotImplementedError
def init(self, *input, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def fwdpass(self, x: torch.Tensor, *h, init=False, init_scale=1.0, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
x: Tensor
The random variable before flow
h: list of object
other conditional inputs
init: bool
perform initialization or not (default: False)
init_scale: float
initial scale (default: 1.0)
Returns: y: Tensor, logdet: Tensor
y, the random variable after flow
logdet, the log determinant of :math:`\partial y / \partial x`
Then the density :math:`\log(p(y)) = \log(p(x)) - logdet`
"""
if self.inverse:
if init:
raise RuntimeError('inverse flow shold be initialized with backward pass')
else:
return self.backward(x, *h, **kwargs)
else:
if init:
return self.init(x, *h, init_scale=init_scale, **kwargs)
else:
return self.forward(x, *h, **kwargs)
def bwdpass(self, y: torch.Tensor, *h, init=False, init_scale=1.0, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
y: Tensor
The random variable after flow
h: list of object
other conditional inputs
init: bool
perform initialization or not (default: False)
init_scale: float
initial scale (default: 1.0)
Returns: x: Tensor, logdet: Tensor
x, the random variable before flow
logdet, the log determinant of :math:`\partial x / \partial y`
Then the density :math:`\log(p(y)) = \log(p(x)) + logdet`
"""
if self.inverse:
if init:
return self.init(y, *h, init_scale=init_scale, **kwargs)
else:
return self.forward(y, *h, **kwargs)
else:
if init:
raise RuntimeError('forward flow should be initialzed with forward pass')
else:
return self.backward(y, *h, **kwargs)
@classmethod
def register(cls, name: str):
Flow._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Flow._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 3,608 | 30.657895 | 118 | py |
flowseq | flowseq-master/flownmt/flows/actnorm.py | from overrides import overrides
from typing import Dict, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
from flownmt.flows.flow import Flow
class ActNormFlow(Flow):
def __init__(self, in_features, inverse=False):
super(ActNormFlow, self).__init__(inverse)
self.in_features = in_features
self.log_scale = Parameter(torch.Tensor(in_features))
self.bias = Parameter(torch.Tensor(in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.log_scale, mean=0, std=0.05)
nn.init.constant_(self.bias, 0.)
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
out = input * self.log_scale.exp() + self.bias
out = out * mask.unsqueeze(dim - 1)
logdet = self.log_scale.sum(dim=0, keepdim=True)
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
out = (input - self.bias) * mask.unsqueeze(dim - 1)
out = out.div(self.log_scale.exp() + 1e-8)
logdet = self.log_scale.sum(dim=0, keepdim=True) * -1.0
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
data: input: Tensor
input tensor [batch, N1, N2, ..., in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
init_scale: float
initial scale
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
with torch.no_grad():
out, _ = self.forward(data, mask)
mean = out.view(-1, self.in_features).mean(dim=0)
std = out.view(-1, self.in_features).std(dim=0)
inv_stdv = init_scale / (std + 1e-6)
self.log_scale.add_(inv_stdv.log())
self.bias.add_(-mean).mul_(inv_stdv)
return self.forward(data, mask)
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}'.format(self.inverse, self.in_features)
@classmethod
def from_params(cls, params: Dict) -> "ActNormFlow":
return ActNormFlow(**params)
ActNormFlow.register('actnorm')
| 3,655 | 32.851852 | 112 | py |
flowseq | flowseq-master/flownmt/flows/linear.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from flownmt.flows.flow import Flow
class InvertibleLinearFlow(Flow):
def __init__(self, in_features, inverse=False):
super(InvertibleLinearFlow, self).__init__(inverse)
self.in_features = in_features
self.weight = Parameter(torch.Tensor(in_features, in_features))
self.register_buffer('weight_inv', self.weight.data.clone())
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.weight)
self.sync()
def sync(self):
self.weight_inv.copy_(self.weight.data.inverse())
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
# [batch, N1, N2, ..., in_features]
out = F.linear(input, self.weight)
_, logdet = torch.slogdet(self.weight)
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
# [batch, N1, N2, ..., in_features]
out = F.linear(input, self.weight_inv)
_, logdet = torch.slogdet(self.weight_inv)
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
return self.forward(data)
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}'.format(self.inverse, self.in_features)
@classmethod
def from_params(cls, params: Dict) -> "InvertibleLinearFlow":
return InvertibleLinearFlow(**params)
class InvertibleMultiHeadFlow(Flow):
@staticmethod
def _get_heads(in_features):
units = [32, 16, 8]
for unit in units:
if in_features % unit == 0:
return in_features // unit
assert in_features < 8, 'features={}'.format(in_features)
return 1
def __init__(self, in_features, heads=None, type='A', inverse=False):
super(InvertibleMultiHeadFlow, self).__init__(inverse)
self.in_features = in_features
if heads is None:
heads = InvertibleMultiHeadFlow._get_heads(in_features)
self.heads = heads
self.type = type
assert in_features % heads == 0, 'features ({}) should be divided by heads ({})'.format(in_features, heads)
assert type in ['A', 'B'], 'type should belong to [A, B]'
self.weight = Parameter(torch.Tensor(in_features // heads, in_features // heads))
self.register_buffer('weight_inv', self.weight.data.clone())
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.weight)
self.sync()
def sync(self):
self.weight_inv.copy_(self.weight.data.inverse())
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
size = input.size()
dim = input.dim()
# [batch, N1, N2, ..., heads, in_features/ heads]
if self.type == 'A':
out = input.view(*size[:-1], self.heads, self.in_features // self.heads)
else:
out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1)
out = F.linear(out, self.weight)
if self.type == 'B':
out = out.transpose(-2, -1).contiguous()
out = out.view(*size)
_, logdet = torch.slogdet(self.weight)
if dim > 2:
num = mask.view(size[0], -1).sum(dim=1) * self.heads
logdet = logdet * num
return out, logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
size = input.size()
dim = input.dim()
# [batch, N1, N2, ..., heads, in_features/ heads]
if self.type == 'A':
out = input.view(*size[:-1], self.heads, self.in_features // self.heads)
else:
out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1)
out = F.linear(out, self.weight_inv)
if self.type == 'B':
out = out.transpose(-2, -1).contiguous()
out = out.view(*size)
_, logdet = torch.slogdet(self.weight_inv)
if dim > 2:
num = mask.view(size[0], -1).sum(dim=1) * self.heads
logdet = logdet * num
return out, logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
return self.forward(data, mask)
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}, heads={}, type={}'.format(self.inverse, self.in_features, self.heads, self.type)
@classmethod
def from_params(cls, params: Dict) -> "InvertibleMultiHeadFlow":
return InvertibleMultiHeadFlow(**params)
InvertibleLinearFlow.register('invertible_linear')
InvertibleMultiHeadFlow.register('invertible_multihead')
| 7,141 | 34.356436 | 124 | py |
flowseq | flowseq-master/flownmt/flows/__init__.py | from flownmt.flows.flow import Flow
from flownmt.flows.actnorm import ActNormFlow
from flownmt.flows.parallel import *
from flownmt.flows.linear import InvertibleMultiHeadFlow, InvertibleLinearFlow
from flownmt.flows.couplings import *
from flownmt.flows.nmt import NMTFlow
| 274 | 38.285714 | 78 | py |
flowseq | flowseq-master/flownmt/flows/parallel/data_parallel.py | from overrides import overrides
from typing import Tuple
import torch
from torch.nn.parallel.replicate import replicate
from flownmt.flows.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.data_parallel import _check_balance
from flownmt.flows.flow import Flow
class DataParallelFlow(Flow):
"""
Implements data parallelism at the flow level.
"""
def __init__(self, flow: Flow, device_ids=None, output_device=None, dim=0):
super(DataParallelFlow, self).__init__(flow.inverse)
if not torch.cuda.is_available():
self.flow = flow
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.flow = flow
self.device_ids = device_ids
self.output_device = output_device
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.flow.cuda(device_ids[0])
@overrides
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.forward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.forward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
@overrides
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.backward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.backward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs, backward=True)
return self.gather(outputs, self.output_device)
@overrides
def init(self, *input, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
return self.flow.init(*input, **kwargs)
def replicate(self, flow, device_ids):
return replicate(flow, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs, backward=False):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)], backward=backward)
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
| 2,891 | 37.56 | 107 | py |
flowseq | flowseq-master/flownmt/flows/parallel/__init__.py | from flownmt.flows.parallel.data_parallel import DataParallelFlow
| 66 | 32.5 | 65 | py |
flowseq | flowseq-master/flownmt/flows/parallel/parallel_apply.py | import threading
import torch
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(flows, inputs, kwargs_tup=None, devices=None, backward=False):
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(flows) == len(inputs)
if kwargs_tup is not None:
assert len(flows) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(flows)
if devices is not None:
assert len(flows) == len(devices)
else:
devices = [None] * len(flows)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, flow, input, kwargs, device=None, back=False):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = flow.backward(*input, **kwargs) if back else flow.forward(*input, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(flows) > 1:
threads = [threading.Thread(target=_worker,
args=(i, flow, input, kwargs, device, backward))
for i, (flow, input, kwargs, device) in
enumerate(zip(flows, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, flows[0], inputs[0], kwargs_tup[0], devices[0], backward)
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
| 2,756 | 33.4625 | 100 | py |
flowseq | flowseq-master/flownmt/flows/couplings/transform.py | import math
from overrides import overrides
from typing import Tuple
import torch
class Transform():
@staticmethod
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
@staticmethod
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
class Additive(Transform):
@staticmethod
@overrides
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu = params
z = (z + mu).mul(mask.unsqueeze(2))
logdet = z.new_zeros(z.size(0))
return z, logdet
@staticmethod
@overrides
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu = params
z = (z - mu).mul(mask.unsqueeze(2))
logdet = z.new_zeros(z.size(0))
return z, logdet
class Affine(Transform):
@staticmethod
@overrides
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu, log_scale = params.chunk(2, dim=2)
scale = log_scale.add_(2.0).sigmoid_()
z = (scale * z + mu).mul(mask.unsqueeze(2))
logdet = scale.log().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1)
return z, logdet
@staticmethod
@overrides
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu, log_scale = params.chunk(2, dim=2)
scale = log_scale.add_(2.0).sigmoid_()
z = (z - mu).div(scale + 1e-12).mul(mask.unsqueeze(2))
logdet = scale.log().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
return z, logdet
def arccosh(x):
return torch.log(x + torch.sqrt(x.pow(2) - 1))
def arcsinh(x):
return torch.log(x + torch.sqrt(x.pow(2) + 1))
class NLSQ(Transform):
# A = 8 * math.sqrt(3) / 9 - 0.05 # 0.05 is a small number to prevent exactly 0 slope
logA = math.log(8 * math.sqrt(3) / 9 - 0.05) # 0.05 is a small number to prevent exactly 0 slope
@staticmethod
def get_pseudo_params(params):
a, logb, cprime, logd, g = params.chunk(5, dim=2)
# for stability
logb = logb.mul_(0.4)
cprime = cprime.mul_(0.3)
logd = logd.mul_(0.4)
# b = logb.add_(2.0).sigmoid_()
# d = logd.add_(2.0).sigmoid_()
# c = (NLSQ.A * b / d).mul(cprime.tanh_())
c = (NLSQ.logA + logb - logd).exp_().mul(cprime.tanh_())
b = logb.exp_()
d = logd.exp_()
return a, b, c, d, g
@staticmethod
@overrides
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
a, b, c, d, g = NLSQ.get_pseudo_params(params)
arg = (d * z).add_(g)
denom = arg.pow(2).add_(1)
c = c / denom
z = (b * z + a + c).mul(mask.unsqueeze(2))
logdet = torch.log(b - 2 * c * d * arg / denom)
logdet = logdet.mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1)
return z, logdet
@staticmethod
@overrides
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
a, b, c, d, g = NLSQ.get_pseudo_params(params)
# double needed for stability. No effect on overall speed
a = a.double()
b = b.double()
c = c.double()
d = d.double()
g = g.double()
z = z.double()
aa = -b * d.pow(2)
bb = (z - a) * d.pow(2) - 2 * b * d * g
cc = (z - a) * 2 * d * g - b * (1 + g.pow(2))
dd = (z - a) * (1 + g.pow(2)) - c
p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2))
q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3))
t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3)
inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p))
inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1)
t = t * torch.cosh(inter_term2)
tpos = -2 * torch.sqrt(torch.abs(p) / 3)
inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p))
inter_term2 = 1 / 3 * arcsinh(inter_term1)
tpos = tpos * torch.sinh(inter_term2)
t[p > 0] = tpos[p > 0]
z = t - bb / (3 * aa)
arg = d * z + g
denom = arg.pow(2) + 1
logdet = torch.log(b - 2 * c * d * arg / denom.pow(2))
z = z.float().mul(mask.unsqueeze(2))
logdet = logdet.float().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
return z, logdet
| 4,619 | 32.478261 | 101 | py |
flowseq | flowseq-master/flownmt/flows/couplings/__init__.py | from flownmt.flows.couplings.coupling import NICE
| 50 | 24.5 | 49 | py |
flowseq | flowseq-master/flownmt/flows/couplings/coupling.py | from overrides import overrides
from typing import Tuple, Dict
import torch
from flownmt.flows.couplings.blocks import NICEConvBlock, NICERecurrentBlock, NICESelfAttnBlock
from flownmt.flows.flow import Flow
from flownmt.flows.couplings.transform import Transform, Additive, Affine, NLSQ
class NICE(Flow):
"""
NICE Flow
"""
def __init__(self, src_features, features, hidden_features=None, inverse=False, split_dim=2, split_type='continuous', order='up', factor=2,
transform='affine', type='conv', kernel=3, rnn_mode='LSTM', heads=1, dropout=0.0, pos_enc='add', max_length=100):
super(NICE, self).__init__(inverse)
self.features = features
assert split_dim in [1, 2]
assert split_type in ['continuous', 'skip']
if split_dim == 1:
assert split_type == 'skip'
if factor != 2:
assert split_type == 'continuous'
assert order in ['up', 'down']
self.split_dim = split_dim
self.split_type = split_type
self.up = order == 'up'
if split_dim == 2:
out_features = features // factor
in_features = features - out_features
self.z1_channels = in_features if self.up else out_features
else:
in_features = features
out_features = features
self.z1_channels = None
assert transform in ['additive', 'affine', 'nlsq']
if transform == 'additive':
self.transform = Additive
elif transform == 'affine':
self.transform = Affine
out_features = out_features * 2
elif transform == 'nlsq':
self.transform = NLSQ
out_features = out_features * 5
else:
raise ValueError('unknown transform: {}'.format(transform))
if hidden_features is None:
hidden_features = min(2 * in_features, 1024)
assert type in ['conv', 'self_attn', 'rnn']
if type == 'conv':
self.net = NICEConvBlock(src_features, in_features, out_features, hidden_features, kernel_size=kernel, dropout=dropout)
elif type == 'rnn':
self.net = NICERecurrentBlock(rnn_mode, src_features, in_features, out_features, hidden_features, dropout=dropout)
else:
self.net = NICESelfAttnBlock(src_features, in_features, out_features, hidden_features,
heads=heads, dropout=dropout, pos_enc=pos_enc, max_length=max_length)
def split(self, z, mask):
split_dim = self.split_dim
split_type = self.split_type
dim = z.size(split_dim)
if split_type == 'continuous':
return z.split([self.z1_channels, dim - self.z1_channels], dim=split_dim), mask
elif split_type == 'skip':
idx1 = torch.tensor(list(range(0, dim, 2))).to(z.device)
idx2 = torch.tensor(list(range(1, dim, 2))).to(z.device)
z1 = z.index_select(split_dim, idx1)
z2 = z.index_select(split_dim, idx2)
if split_dim == 1:
mask = mask.index_select(split_dim, idx1)
return (z1, z2), mask
else:
raise ValueError('unknown split type: {}'.format(split_type))
def unsplit(self, z1, z2):
split_dim = self.split_dim
split_type = self.split_type
if split_type == 'continuous':
return torch.cat([z1, z2], dim=split_dim)
elif split_type == 'skip':
z = torch.cat([z1, z2], dim=split_dim)
dim = z1.size(split_dim)
idx = torch.tensor([i // 2 if i % 2 == 0 else i // 2 + dim for i in range(dim * 2)]).to(z.device)
return z.index_select(split_dim, idx)
else:
raise ValueError('unknown split type: {}'.format(split_type))
def calc_params(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor):
params = self.net(z, mask, src, src_mask)
return params
def init_net(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0):
params = self.net.init(z, mask, src, src_mask, init_scale=init_scale)
return params
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, length, in_features]
mask: Tensor
mask tensor [batch, length]
src: Tensor
source input tensor [batch, src_length, src_features]
src_mask: Tensor
source mask tensor [batch, src_length]
Returns: out: Tensor , logdet: Tensor
out: [batch, length, in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
# [batch, length, in_channels]
(z1, z2), mask = self.split(input, mask)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.calc_params(z, mask, src, src_mask)
zp, logdet = self.transform.fwd(zp, mask, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, length, in_features]
mask: Tensor
mask tensor [batch, length]
src: Tensor
source input tensor [batch, src_length, src_features]
src_mask: Tensor
source mask tensor [batch, src_length]
Returns: out: Tensor , logdet: Tensor
out: [batch, length, in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
# [batch, length, in_channels]
(z1, z2), mask = self.split(input, mask)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.calc_params(z, mask, src, src_mask)
zp, logdet = self.transform.bwd(zp, mask, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, length, in_channels]
(z1, z2), mask = self.split(data, mask)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.init_net(z, mask, src, src_mask, init_scale=init_scale)
zp, logdet = self.transform.fwd(zp, mask, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def extra_repr(self):
return 'inverse={}, in_channels={}, scale={}'.format(self.inverse, self.in_channels, self.scale)
@classmethod
def from_params(cls, params: Dict) -> "NICE":
return NICE(**params)
NICE.register('nice')
| 7,316 | 40.573864 | 155 | py |
flowseq | flowseq-master/flownmt/flows/couplings/blocks.py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.nnet.weightnorm import Conv1dWeightNorm, LinearWeightNorm
from flownmt.nnet.attention import GlobalAttention, MultiHeadAttention
from flownmt.nnet.positional_encoding import PositionalEncoding
from flownmt.nnet.transformer import TransformerDecoderLayer
class NICEConvBlock(nn.Module):
def __init__(self, src_features, in_features, out_features, hidden_features, kernel_size, dropout=0.0):
super(NICEConvBlock, self).__init__()
self.conv1 = Conv1dWeightNorm(in_features, hidden_features, kernel_size=kernel_size, padding=kernel_size // 2, bias=True)
self.conv2 = Conv1dWeightNorm(hidden_features, hidden_features, kernel_size=kernel_size, padding=kernel_size // 2, bias=True)
self.activation = nn.ELU(inplace=True)
self.attn = GlobalAttention(src_features, hidden_features, hidden_features, dropout=dropout)
self.linear = LinearWeightNorm(hidden_features * 2, out_features, bias=True)
def forward(self, x, mask, src, src_mask):
"""
Args:
x: Tensor
input tensor [batch, length, in_features]
mask: Tensor
x mask tensor [batch, length]
src: Tensor
source input tensor [batch, src_length, src_features]
src_mask: Tensor
source mask tensor [batch, src_length]
Returns: Tensor
out tensor [batch, length, out_features]
"""
out = self.activation(self.conv1(x.transpose(1, 2)))
out = self.activation(self.conv2(out)).transpose(1, 2) * mask.unsqueeze(2)
out = self.attn(out, src, key_mask=src_mask.eq(0))
out = self.linear(torch.cat([x, out], dim=2))
return out
def init(self, x, mask, src, src_mask, init_scale=1.0):
out = self.activation(self.conv1.init(x.transpose(1, 2), init_scale=init_scale))
out = self.activation(self.conv2.init(out, init_scale=init_scale)).transpose(1, 2) * mask.unsqueeze(2)
out = self.attn.init(out, src, key_mask=src_mask.eq(0), init_scale=init_scale)
out = self.linear.init(torch.cat([x, out], dim=2), init_scale=0.0)
return out
class NICERecurrentBlock(nn.Module):
def __init__(self, rnn_mode, src_features, in_features, out_features, hidden_features, dropout=0.0):
super(NICERecurrentBlock, self).__init__()
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
self.rnn = RNN(in_features, hidden_features // 2, batch_first=True, bidirectional=True)
self.attn = GlobalAttention(src_features, hidden_features, hidden_features, dropout=dropout)
self.linear = LinearWeightNorm(in_features + hidden_features, out_features, bias=True)
def forward(self, x, mask, src, src_mask):
lengths = mask.sum(dim=1).long()
packed_out = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False)
packed_out, _ = self.rnn(packed_out)
out, _ = pad_packed_sequence(packed_out, batch_first=True, total_length=mask.size(1))
# [batch, length, out_features]
out = self.attn(out, src, key_mask=src_mask.eq(0))
out = self.linear(torch.cat([x, out], dim=2))
return out
def init(self, x, mask, src, src_mask, init_scale=1.0):
lengths = mask.sum(dim=1).long()
packed_out = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False)
packed_out, _ = self.rnn(packed_out)
out, _ = pad_packed_sequence(packed_out, batch_first=True, total_length=mask.size(1))
# [batch, length, out_features]
out = self.attn.init(out, src, key_mask=src_mask.eq(0), init_scale=init_scale)
out = self.linear.init(torch.cat([x, out], dim=2), init_scale=0.0)
return out
class NICESelfAttnBlock(nn.Module):
def __init__(self, src_features, in_features, out_features, hidden_features, heads, dropout=0.0,
pos_enc='add', max_length=100):
super(NICESelfAttnBlock, self).__init__()
assert pos_enc in ['add', 'attn']
self.src_proj = nn.Linear(src_features, in_features, bias=False) if src_features != in_features else None
self.pos_enc = PositionalEncoding(in_features, padding_idx=None, init_size=max_length + 1)
self.pos_attn = MultiHeadAttention(in_features, heads, dropout=dropout) if pos_enc == 'attn' else None
self.transformer = TransformerDecoderLayer(in_features, hidden_features, heads, dropout=dropout)
self.linear = LinearWeightNorm(in_features, out_features, bias=True)
def forward(self, x, mask, src, src_mask):
if self.src_proj is not None:
src = self.src_proj(src)
key_mask = mask.eq(0)
pos_enc = self.pos_enc(x) * mask.unsqueeze(2)
if self.pos_attn is None:
x = x + pos_enc
else:
x = self.pos_attn(pos_enc, x, x, key_mask)
x = self.transformer(x, key_mask, src, src_mask.eq(0))
return self.linear(x)
def init(self, x, mask, src, src_mask, init_scale=1.0):
if self.src_proj is not None:
src = self.src_proj(src)
key_mask = mask.eq(0)
pos_enc = self.pos_enc(x) * mask.unsqueeze(2)
if self.pos_attn is None:
x = x + pos_enc
else:
x = self.pos_attn(pos_enc, x, x, key_mask)
x = self.transformer.init(x, key_mask, src, src_mask.eq(0), init_scale=init_scale)
x = x * mask.unsqueeze(2)
return self.linear.init(x, init_scale=0.0)
| 5,809 | 44.748031 | 133 | py |
flowseq | flowseq-master/flownmt/optim/lr_scheduler.py | from torch.optim.optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_epoch = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class InverseSquareRootScheduler(_LRScheduler):
"""
Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from zero until the configured learning rate (``--lr``).
Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(0, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = args.lr * sqrt(args.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, optimizer, warmup_steps, init_lr, last_epoch=-1):
assert warmup_steps > 0, 'warmup steps should be larger than 0.'
super(InverseSquareRootScheduler, self).__init__(optimizer, last_epoch)
self.warmup_steps = float(warmup_steps)
self.init_lr = init_lr
self.lr_steps = [(base_lr - init_lr) / warmup_steps for base_lr in self.base_lrs]
self.decay_factor = self.warmup_steps ** 0.5
if last_epoch == -1:
last_epoch = 0
self.step(last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_steps:
return [self.init_lr + lr_step * self.last_epoch for lr_step in self.lr_steps]
else:
lr_factor = self.decay_factor * self.last_epoch**-0.5
return [base_lr * lr_factor for base_lr in self.base_lrs]
class ExponentialScheduler(_LRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma every epoch. When last_epoch=-1, sets initial lr as lr.
We also support a warmup phase where we linearly increase the learning rate
from zero until the configured learning rate (``--lr``).
Args:
optimizer (Optimizer): Wrapped optimizer.
gamma (float): Multiplicative factor of learning rate decay.
warmup_steps (int): Warmup steps..
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(self, optimizer, gamma, warmup_steps, init_lr, last_epoch=-1):
super(ExponentialScheduler, self).__init__(optimizer, last_epoch)
self.gamma = gamma
# handle warmup <= 0
self.warmup_steps = max(1, warmup_steps)
self.init_lr = init_lr
self.lr_steps = [(base_lr - init_lr) / self.warmup_steps for base_lr in self.base_lrs]
if last_epoch == -1:
last_epoch = 0
self.step(last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_steps:
return [self.init_lr + lr_step * self.last_epoch for lr_step in self.lr_steps]
else:
lr_factor = self.gamma ** (self.last_epoch - self.warmup_steps)
return [base_lr * lr_factor for base_lr in self.base_lrs]
| 4,603 | 40.477477 | 94 | py |
flowseq | flowseq-master/flownmt/optim/adamw.py | import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
Adam has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 4,811 | 41.584071 | 116 | py |
flowseq | flowseq-master/flownmt/optim/__init__.py | from flownmt.optim.adamw import AdamW
from flownmt.optim.lr_scheduler import InverseSquareRootScheduler, ExponentialScheduler
| 126 | 41.333333 | 87 | py |
flowseq | flowseq-master/flownmt/nnet/weightnorm.py | from overrides import overrides
import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
"""
Linear with weight normalization
"""
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
def init(self, x, init_scale=1.0):
with torch.no_grad():
# [batch, out_features]
out = self(x).view(-1, self.linear.out_features)
# [out_features]
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-6)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class Conv1dWeightNorm(nn.Module):
"""
Conv1d with weight normalization
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv1dWeightNorm, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.conv.weight, mean=0.0, std=0.05)
if self.conv.bias is not None:
nn.init.constant_(self.conv.bias, 0)
self.conv = nn.utils.weight_norm(self.conv)
def init(self, x, init_scale=1.0):
with torch.no_grad():
# [batch, n_channels, L]
out = self(x)
n_channels = out.size(1)
out = out.transpose(0, 1).contiguous().view(n_channels, -1)
# [n_channels]
mean = out.mean(dim=1)
std = out.std(dim=1)
inv_stdv = init_scale / (std + 1e-6)
self.conv.weight_g.mul_(inv_stdv.view(n_channels, 1, 1))
if self.conv.bias is not None:
self.conv.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.conv(input)
@overrides
def extra_repr(self):
return self.conv.extra_repr()
| 2,806 | 32.819277 | 91 | py |
flowseq | flowseq-master/flownmt/nnet/transformer.py | import torch.nn as nn
from flownmt.nnet.attention import MultiHeadAttention, PositionwiseFeedForward
class TransformerEncoderLayer(nn.Module):
def __init__(self, model_dim, hidden_dim, heads, dropout=0.0, mask_diag=False):
super(TransformerEncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(model_dim, heads, dropout=dropout, mask_diag=mask_diag)
self.pos_ffn = PositionwiseFeedForward(model_dim, hidden_dim, dropout=dropout)
def forward(self, x, mask):
out = self.slf_attn(x, x, x, key_mask=mask)
out = self.pos_ffn(out)
return out
def init(self, x, mask, init_scale=1.0):
out = self.slf_attn.init(x, x, x, key_mask=mask, init_scale=init_scale)
out = self.pos_ffn.init(out, init_scale=init_scale)
return out
class TransformerDecoderLayer(nn.Module):
def __init__(self, model_dim, hidden_dim, heads, dropout=0.0, mask_diag=False):
super(TransformerDecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(model_dim, heads, dropout=dropout, mask_diag=mask_diag)
self.enc_attn = MultiHeadAttention(model_dim, heads, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(model_dim, hidden_dim, dropout=dropout)
def forward(self, x, mask, src, src_mask):
out = self.slf_attn(x, x, x, key_mask=mask)
out = self.enc_attn(out, src, src, key_mask=src_mask)
out = self.pos_ffn(out)
return out
def init(self, x, mask, src, src_mask, init_scale=1.0):
out = self.slf_attn.init(x, x, x, key_mask=mask, init_scale=init_scale)
out = self.enc_attn.init(out, src, src, key_mask=src_mask, init_scale=init_scale)
out = self.pos_ffn.init(out, init_scale=init_scale)
return out
| 1,784 | 42.536585 | 98 | py |
flowseq | flowseq-master/flownmt/nnet/positional_encoding.py | import math
import torch
import torch.nn as nn
from flownmt.utils import make_positions
class PositionalEncoding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, encoding_dim, padding_idx, init_size=1024):
super().__init__()
self.encoding_dim = encoding_dim
self.padding_idx = padding_idx
self.weights = PositionalEncoding.get_embedding(
init_size,
encoding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_encodings, encoding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = encoding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_encodings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_encodings, -1)
if encoding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_encodings, 1)], dim=1)
emb[0, :] = 0
return emb
def forward(self, x):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = x.size()[:2]
max_pos = seq_len + 1
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = PositionalEncoding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.type_as(self._float_tensor)
if self.padding_idx is None:
return self.weights[1:seq_len + 1].detach()
else:
positions = make_positions(x, self.padding_idx)
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
| 2,348 | 36.285714 | 99 | py |
flowseq | flowseq-master/flownmt/nnet/__init__.py | from flownmt.nnet.weightnorm import LinearWeightNorm, Conv1dWeightNorm
from flownmt.nnet.attention import GlobalAttention, MultiHeadAttention, PositionwiseFeedForward
from flownmt.nnet.transformer import TransformerEncoderLayer, TransformerDecoderLayer
from flownmt.nnet.layer_norm import LayerNorm
from flownmt.nnet.positional_encoding import PositionalEncoding
from flownmt.nnet.criterion import LabelSmoothedCrossEntropyLoss
| 428 | 60.285714 | 95 | py |
flowseq | flowseq-master/flownmt/nnet/layer_norm.py | import torch
import torch.nn as nn
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return nn.LayerNorm(normalized_shape, eps, elementwise_affine)
| 428 | 32 | 81 | py |
flowseq | flowseq-master/flownmt/nnet/criterion.py | import torch.nn.functional as F
import torch.nn as nn
class LabelSmoothedCrossEntropyLoss(nn.Module):
"""
Cross Entropy loss with label smoothing.
For training, the loss is smoothed with parameter eps, while for evaluation, the smoothing is disabled.
"""
def __init__(self, label_smoothing):
super(LabelSmoothedCrossEntropyLoss, self).__init__()
self.eps = label_smoothing
def forward(self, input, target):
# [batch, c, d1, ..., dk]
loss = F.log_softmax(input, dim=1) * -1.
# [batch, d1, ..., dk]
nll_loss = loss.gather(dim=1, index=target.unsqueeze(1)).squeeze(1)
if self.training:
# [batch, c, d1, ..., dk]
inf_mask = loss.eq(float('inf'))
# [batch, d1, ..., dk]
smooth_loss = loss.masked_fill(inf_mask, 0.).sum(dim=1)
eps_i = self.eps / (1.0 - inf_mask.float()).sum(dim=1)
return nll_loss * (1. - self.eps) + smooth_loss * eps_i
else:
return nll_loss
| 1,029 | 35.785714 | 107 | py |
flowseq | flowseq-master/flownmt/nnet/attention.py | from overrides import overrides
import torch
from torch.nn import Parameter
import torch.nn as nn
import torch.nn.functional as F
from flownmt.nnet.layer_norm import LayerNorm
class GlobalAttention(nn.Module):
"""
Global Attention between encoder and decoder
"""
def __init__(self, key_features, query_features, value_features, hidden_features=None, dropout=0.0):
"""
Args:
key_features: int
dimension of keys
query_features: int
dimension of queries
value_features: int
dimension of values (outputs)
hidden_features: int
dimension of hidden states (default value_features)
dropout: float
dropout rate
"""
super(GlobalAttention, self).__init__()
if hidden_features is None:
hidden_features = value_features
self.key_proj = nn.Linear(key_features, 2 * hidden_features, bias=True)
self.query_proj = nn.Linear(query_features, hidden_features, bias=True)
self.dropout = dropout
self.fc = nn.Linear(hidden_features, value_features)
self.hidden_features = hidden_features
self.reset_parameters()
def reset_parameters(self):
# key proj
nn.init.xavier_uniform_(self.key_proj.weight)
nn.init.constant_(self.key_proj.bias, 0)
# query proj
nn.init.xavier_uniform_(self.query_proj.weight)
nn.init.constant_(self.query_proj.bias, 0)
# fc
nn.init.xavier_uniform_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
@overrides
def forward(self, query, key, key_mask=None):
"""
Args:
query: Tensor
query tensor [batch, query_length, query_features]
key: Tensor
key tensor [batch, key_length, key_features]
key_mask: ByteTensor or None
binary ByteTensor [batch, src_len] padding elements are indicated by 1s.
Returns: Tensor
value tensor [batch, query_length, value_features]
"""
bs, timesteps, _ = key.size()
dim = self.hidden_features
# [batch, query_length, dim]
query = self.query_proj(query)
# [batch, key_length, 2 * dim]
c = self.key_proj(key)
# [batch, key_length, 2, dim]
c = c.view(bs, timesteps, 2, dim)
# [batch, key_length, dim]
key = c[:, :, 0]
value = c[:, :, 1]
# attention weights [batch, query_length, key_length]
attn_weights = torch.bmm(query, key.transpose(1, 2))
if key_mask is not None:
attn_weights = attn_weights.masked_fill(key_mask.unsqueeze(1), float('-inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1,
dtype=torch.float32 if attn_weights.dtype == torch.float16 else attn_weights.dtype)
# values [batch, query_length, dim]
out = torch.bmm(attn_weights, value)
out = F.dropout(self.fc(out), p=self.dropout, training=self.training)
return out
def init(self, query, key, key_mask=None, init_scale=1.0):
with torch.no_grad():
return self(query, key, key_mask=key_mask)
class MultiHeadAttention(nn.Module):
"""
Multi-head Attention
"""
def __init__(self, model_dim, heads, dropout=0.0, mask_diag=False):
"""
Args:
model_dim: int
the input dimension for keys, queries and values
heads: int
number of heads
dropout: float
dropout rate
"""
super(MultiHeadAttention, self).__init__()
self.model_dim = model_dim
self.head_dim = model_dim // heads
self.heads = heads
self.dropout = dropout
self.mask_diag = mask_diag
assert self.head_dim * heads == self.model_dim, "model_dim must be divisible by number of heads"
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.empty(3 * model_dim, model_dim))
self.in_proj_bias = Parameter(torch.empty(3 * model_dim))
self.layer_norm = LayerNorm(model_dim)
self.reset_parameters()
def reset_parameters(self):
# in proj
nn.init.xavier_uniform_(self.in_proj_weight[:self.model_dim, :])
nn.init.xavier_uniform_(self.in_proj_weight[self.model_dim:(self.model_dim * 2), :])
nn.init.xavier_uniform_(self.in_proj_weight[(self.model_dim * 2):, :])
nn.init.constant_(self.in_proj_bias, 0.)
def forward(self, query, key, value, key_mask=None):
"""
Args:
query: Tenfor
[batch, tgt_len, model_dim]
key: Tensor
[batch, src_len, model_dim]
value: Tensor
[batch, src_len, model_dim]
key_mask: ByteTensor or None
binary ByteTensor [batch, src_len] padding elements are indicated by 1s.
Returns:
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
bs, src_len, model_dim = key.size()
tgt_len = query.size(1)
heads = self.heads
residual = query
# k, v: [bs, src_len, model_dim]
# q: [bs, tgt_len, model_dim]
if qkv_same:
# self-attention
q, k, v = self._in_proj_qkv(query)
elif kv_same:
# encoder-decoder attention
q = self._in_proj_q(query)
k, v = self._in_proj_kv(key)
else:
q = self._in_proj_q(query)
k = self._in_proj_k(key)
v = self._in_proj_v(value)
q *= self.scaling
model_dim = q.size(2)
dim = model_dim // heads
# [len, batch, model_dim] -> [len, batch * heads, dim] -> [batch * heads, len, dim]
q = q.transpose(0, 1).contiguous().view(tgt_len, bs * heads, dim).transpose(0, 1)
k = k.transpose(0, 1).contiguous().view(src_len, bs * heads, dim).transpose(0, 1)
v = v.transpose(0, 1).contiguous().view(src_len, bs * heads, dim).transpose(0, 1)
# attention weights [batch * heads, tgt_len, src_len]
attn_weights = torch.bmm(q, k.transpose(1, 2))
if key_mask is not None:
attn_weights = attn_weights.view(bs, heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(key_mask.unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view(bs * heads, tgt_len, src_len)
if self.mask_diag:
assert tgt_len == src_len
# [1, tgt_len, tgt_len]
diag_mask = torch.eye(tgt_len, device=query.device, dtype=torch.uint8).unsqueeze(0)
attn_weights = attn_weights.masked_fill(diag_mask, float('-inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1,
dtype=torch.float32 if attn_weights.dtype == torch.float16 else attn_weights.dtype)
# outputs [batch * heads, tgt_len, dim]
out = torch.bmm(attn_weights, v)
# merge heads
# [batch, heads, tgt_len, dim] -> [batch, tgt_len, heads, dim]
# -> [batch, tgt_len, model_dim]
out = out.view(bs, heads, tgt_len, dim).transpose(1, 2).contiguous().view(bs, tgt_len, model_dim)
out = F.dropout(out, p=self.dropout, training=self.training)
out = self.layer_norm(out + residual)
return out
def init(self, query, key, value, key_mask=None, init_scale=1.0):
with torch.no_grad():
return self(query, key, value, key_mask=key_mask)
def _in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def _in_proj_kv(self, key):
return self._in_proj(key, start=self.model_dim).chunk(2, dim=-1)
def _in_proj_q(self, query):
return self._in_proj(query, end=self.model_dim)
def _in_proj_k(self, key):
return self._in_proj(key, start=self.model_dim, end=2 * self.model_dim)
def _in_proj_v(self, value):
return self._in_proj(value, start=2 * self.model_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
class PositionwiseFeedForward(nn.Module):
def __init__(self, features, hidden_features, dropout=0.0):
super(PositionwiseFeedForward, self).__init__()
self.linear1 = nn.Linear(features, hidden_features)
self.dropout = dropout
self.linear2 = nn.Linear(hidden_features, features)
self.layer_norm = LayerNorm(features)
def forward(self, x):
residual = x
x = F.relu(self.linear1(x), inplace=True)
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.dropout(self.linear2(x), p=self.dropout, training=self.training)
x = self.layer_norm(residual + x)
return x
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
| 9,245 | 35.401575 | 116 | py |
flowseq | flowseq-master/flownmt/data/dataloader.py | import codecs
import math
import random
from collections import defaultdict
import numpy as np
import torch
import os
def get_sorted_wordlist(path):
freqs = defaultdict(lambda: 0)
with codecs.open(path, "r", encoding="utf-8") as fin:
for line in fin:
words = line.strip().split()
for word in words:
freqs[word] += 1
sorted_words = sorted(freqs, key=freqs.get, reverse=True)
wordlist = [word for word in sorted_words]
return wordlist
UNK = "<unk>"
EOS = "<eos>"
PAD = "<pad>"
SRC_PAD = PAD
TGT_PAD = PAD
class NMTDataSet():
def __init__(self, data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab,
subword, create_vocab):
self.train_src_path = os.path.join(data_path, 'train.{}'.format(src_lang))
self.train_tgt_path = os.path.join(data_path, 'train.{}'.format(tgt_lang))
self.dev_src_path = os.path.join(data_path, 'dev.{}'.format(src_lang))
self.dev_tgt_path = os.path.join(data_path, 'dev.{}'.format(tgt_lang))
self.test_src_path = os.path.join(data_path, 'test.{}'.format(src_lang))
self.test_tgt_path = os.path.join(data_path, 'test.{}'.format(tgt_lang))
self.subword = subword
if "bpe" in subword:
self.dev_tgt_path_ori = os.path.join(data_path, 'dev.{}.ori'.format(tgt_lang))
self.test_tgt_path_ori = os.path.join(data_path, 'test.{}.ori'.format(tgt_lang))
else:
self.dev_tgt_path_ori = self.dev_tgt_path
self.test_tgt_path_ori = self.test_tgt_path
if not create_vocab:
assert src_vocab_path is not None and tgt_vocab_path is not None and os.path.exists(src_vocab_path) and os.path.exists(tgt_vocab_path)
self.src_word2id, self.src_id2word = self.load_vocab(src_vocab_path)
self.tgt_word2id, self.tgt_id2word = self.load_vocab(tgt_vocab_path)
else:
if subword == "joint-bpe":
joint_path = os.path.join(data_path, "joint.tmp")
os.system("cat %s %s > %s" % (self.train_src_path, self.train_tgt_path, joint_path))
assert src_max_vocab == tgt_max_vocab, "src max vocab size != tgt max vocab size"
word2id, id2word = self.get_vocab(joint_path, src_max_vocab, has_pad=True)
os.remove(joint_path)
self.src_word2id = self.tgt_word2id = word2id
self.src_id2word = self.tgt_id2word = id2word
else:
if subword == "sep-bpe":
assert src_max_vocab == tgt_max_vocab, "src max vocab size != tgt max vocab size"
self.src_word2id, self.src_id2word = self.get_vocab(self.train_src_path, src_max_vocab, has_pad=True)
self.tgt_word2id, self.tgt_id2word = self.get_vocab(self.train_tgt_path, tgt_max_vocab, has_pad=True)
if src_vocab_path is not None and tgt_vocab_path is not None:
self.save_vocab(self.src_id2word, src_vocab_path)
self.save_vocab(self.tgt_id2word, tgt_vocab_path)
self.src_vocab_size = len(self.src_word2id)
self.tgt_vocab_size = len(self.tgt_word2id)
self.src_pad_idx = self.src_word2id[SRC_PAD]
self.tgt_pad_idx = self.tgt_word2id[TGT_PAD]
print(f"Source vocab size={len(self.src_word2id)}, target vocab size={len(self.tgt_word2id)}")
def load_vocab(self, path):
word2id = {}
i = 0
with codecs.open(path, "r", "utf-8") as fin:
for line in fin:
word2id[line.strip()] = i
i += 1
id2word = {v: k for k, v in word2id.items()}
return word2id, id2word
def save_vocab(self, id2word, path):
print(f"Saving vocab to {path}")
with codecs.open(path, "w", encoding="utf-8") as fout:
for i in range(len(id2word)):
fout.write(id2word[i] + "\n")
def get_vocab(self, path, max_vocab=-1, has_pad=True):
if max_vocab > 0:
max_vocab = max_vocab - 3 if has_pad else max_vocab - 2
wordlist = get_sorted_wordlist(path)
if max_vocab > 0:
wordlist = wordlist[:max_vocab]
word2id = {}
if has_pad:
word2id[PAD] = 0
word2id[UNK] = len(word2id)
word2id[EOS] = len(word2id)
for word in wordlist:
word2id[word] = len(word2id)
id2word = {i: word for word, i in word2id.items()}
return word2id, id2word
def dump_to_file(self, ms, lengths, path, post_edit=True):
# ms: list of (batch_size, sent_len)
with codecs.open(path, "w", encoding="utf-8") as fout:
for m, length in zip(ms, lengths):
m = m.cpu().numpy()
length = length.cpu().numpy()
for line, l in zip(m, length):
sent = []
for w in line[:l]:
word = self.tgt_id2word[w]
if word == EOS:
break
sent.append(word)
if post_edit and (self.subword == "sep-bpe" or self.subword == "joint-bpe"):
line = ' '.join(sent)
line = line.replace('@@ ', '').strip()
if line.endswith("@@"):
line = line[-2:]
elif post_edit and (self.subword == "joint-spm"):
line = ''.join(sent)
line = line.replace('▁', ' ').strip()
else:
line = " ".join(sent)
fout.write(line + "\n")
def max_tok_len(example, count):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(example[0]) + 1)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(example[1]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def batch_iter(data, batch_size, batch_size_fn=None, shuffle=False):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch))
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1)
if minibatch:
yield minibatch
def bucket_batch_iter(data, batch_size, batch_size_fn=None, shuffle=False):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count):
return count
buckets = [20, 40, 60, 80]
bucket_data = [[] for _ in buckets]
outliers = []
for ex in data:
tgt_len = len(ex[1])
if tgt_len > buckets[-1]:
outliers.append(ex)
continue
for bid, bl in enumerate(buckets):
if tgt_len <= bl:
bucket_data[bid].append(ex)
break
if len(outliers) > 0:
bucket_data.append(outliers)
batches, minibatch, size_so_far = [], [], 0
for bucket in bucket_data:
if shuffle:
random.shuffle(bucket)
for ex in bucket:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch))
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if overflowed == 0:
batches.append(minibatch)
minibatch, size_so_far = [], 0
else:
batches.append(minibatch[:-overflowed])
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1)
if minibatch:
batches.append(minibatch)
if shuffle:
random.shuffle(batches)
for minibatch in batches:
yield minibatch
class DataIterator():
def __init__(self, dataset, batch_size, batch_by_tokens, max_src_length, max_tgt_length, buffer_multiple_size,
device, model_path, len_diff=-1, len_ratio=-1, multi_scale=1, corpus="train",
bucket_data=True, rank=-1, num_replicas=0):
self.train = False # need shuffle and sort
self.device = device
if corpus == "train":
self.src_path = dataset.train_src_path
self.tgt_path = dataset.train_tgt_path
self.tgt_path_ori = None
self.train = True
elif corpus == "dev":
self.src_path = dataset.dev_src_path
self.tgt_path = dataset.dev_tgt_path
self.tgt_path_ori = dataset.dev_tgt_path_ori
elif corpus == "test":
self.src_path = dataset.test_src_path
self.tgt_path = dataset.test_tgt_path
self.tgt_path_ori = dataset.test_tgt_path_ori
else:
raise ValueError
self.corpus = corpus
self.batch_size = batch_size
self.batch_size_fn = max_tok_len if batch_by_tokens else None
self.max_src_length = max_src_length
self.max_tgt_length = max_tgt_length
self.len_diff = len_diff
self.len_ratio = len_ratio
self.multi_scale = multi_scale
self.src_word2id = dataset.src_word2id
self.tgt_word2id = dataset.tgt_word2id
if rank < 0:
assert num_replicas == 0
else:
assert corpus == 'train'
assert rank < num_replicas
assert self.tgt_path_ori is None
self.rank = rank
self.num_replicas = num_replicas
self.data_size, self.data = self.get_dataset()
self.batches = None
if self.train:
self.buffer_size = buffer_multiple_size * self.batch_size
assert buffer_multiple_size > 0
else:
self.buffer_size = -1
self.src_pad_idx = self.src_word2id[SRC_PAD]
self.tgt_pad_idx = self.tgt_word2id[TGT_PAD]
self.bucket = bucket_data
self.sents_num = 0
self.tgt_sort_origin_path = os.path.join(model_path, os.path.basename(self.tgt_path) + ".sort")
def filter_sents(self, s_tokens, t_tokens):
if self.max_tgt_length > 0 and self.max_src_length > 0:
if len(s_tokens) + 1 > self.max_src_length or len(t_tokens) + 1 > self.max_tgt_length:
return True
if self.len_diff > 0:
if abs(len(s_tokens) - len(t_tokens)) > self.len_diff:
return True
if self.len_ratio > 0:
ratio = len(t_tokens) / len(s_tokens)
if ratio > self.len_ratio or ratio < (1. / self.len_ratio):
return True
return False
def pad_tgt(self, tgt):
scale = self.multi_scale
tgt_len = len(tgt)
res = tgt_len % scale if tgt_len % scale > 0 else scale
tgt_len = (scale - res) + tgt_len
tgt = tgt + [self.tgt_word2id[EOS]] * (tgt_len - len(tgt))
return tgt
def get_dataset(self):
count = 0
data = []
outliers = 0
src_path, tgt_path = self.src_path, self.tgt_path
tgt_ori_path = self.tgt_path_ori
ftgt_ori = None if tgt_ori_path is None else codecs.open(tgt_ori_path, "r", encoding="utf-8")
with codecs.open(src_path, "r", encoding="utf-8") as fsrc, codecs.open(tgt_path, "r", encoding="utf-8") as ftgt:
for id, (s, t) in enumerate(zip(fsrc, ftgt)):
if self.num_replicas > 0 and id % self.num_replicas != self.rank:
continue
s_tokens = s.strip().split()
t_tokens = t.strip().split()
t_ori = ftgt_ori.readline().strip() if ftgt_ori is not None else None
src = [self.src_word2id[word] if word in self.src_word2id else self.src_word2id[UNK] for word in s_tokens] + [self.src_word2id[EOS]]
tgt = [self.tgt_word2id[word] if word in self.tgt_word2id else self.tgt_word2id[UNK] for word in t_tokens] #+ [self.tgt_word2id[EOS]]
tgt = self.pad_tgt(tgt)
if self.train and self.filter_sents(src, tgt):
outliers += 1
continue
else:
if not self.train:
data.append((src, tgt, t_ori))
if self.filter_sents(src, tgt):
outliers += 1
else:
data.append((src, tgt))
count += 1
print(f"Load total {count} sentences pairs, {outliers} are out of maximum sentence length!")
return count, data
def batch(self, batch_size):
"""Yield elements from data in chunks of batch_size."""
batch_size_fn = self.batch_size_fn
if batch_size_fn is None:
def batch_size_fn(new, count):
return count
minibatch, size_so_far = [], 0
for ex in self.data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def process_batch(self, minibatch):
# padding and make mask of minibatch
# return: batch_size x max_len
# minibatch = sorted(minibatch, key=lambda x: len(x[1]), reverse=True)
src_max_len = max([len(d[0]) for d in minibatch])
tgt_max_len = max([len(d[1]) for d in minibatch])
padded_src, padded_tgt = [], []
src_mask = []
tgt_mask = []
for d in minibatch:
s, t = d[0], d[1]
padded_src.append(s + [self.src_pad_idx] * (src_max_len - len(s)))
padded_tgt.append(t + [self.tgt_pad_idx] * (tgt_max_len - len(t)))
src_mask.append([1.] * len(s) + [0.] * (src_max_len - len(s)))
tgt_mask.append([1.] * len(t) + [0.] * (tgt_max_len - len(t)))
padded_src = torch.from_numpy(np.array(padded_src)).long().to(self.device)
padded_tgt = torch.from_numpy(np.array(padded_tgt)).long().to(self.device)
src_mask = torch.from_numpy(np.array(src_mask)).float().to(self.device)
tgt_mask = torch.from_numpy(np.array(tgt_mask)).float().to(self.device)
return padded_src, padded_tgt, src_mask, tgt_mask
def init_epoch(self):
# NOTE: `rnn.pack_padded_sequence` requires that a minibatch
# be sorted by decreasing order, which requires reversing
# relative to typical sort keys
if self.train:
def _pool():
for p in self.batch(self.buffer_size):
if self.bucket:
p_batch = bucket_batch_iter(p,
self.batch_size,
batch_size_fn=self.batch_size_fn, shuffle=True)
else:
p_batch = batch_iter(random.sample(p, len(p)),
self.batch_size,
batch_size_fn=self.batch_size_fn)
p_batch = list(p_batch)
for b in p_batch:
yield b
self.batches = _pool()
else:
if self.batches is None:
self.batches = []
else:
self.batches.clear()
iter_func = bucket_batch_iter if self.bucket else batch_iter
for b in iter_func(
self.data,
self.batch_size,
batch_size_fn=self.batch_size_fn):
# self.batches.append(sorted(b, key=lambda x: len(x[1]), reverse=True))
self.batches.append(b)
def __iter__(self):
while True:
self.init_epoch()
tgt_ori_sents = []
for idx, minibatch in enumerate(self.batches):
self.sents_num += len(minibatch)
if not self.train:
tgt_ori_sents.append([d[2] for d in minibatch])
src_batch, tgt_batch, src_mask, tgt_mask = self.process_batch(minibatch)
yield src_batch, tgt_batch, src_mask, tgt_mask
if not self.train:
with codecs.open(self.tgt_sort_origin_path, "w", encoding="utf-8") as fout:
for b in tgt_ori_sents:
for sent in b:
fout.write(sent + "\n")
return
def get_batch(self, batch_size):
batch = random.sample(self.data, batch_size)
return self.process_batch(batch)
@property
def epoch(self):
return self.sents_num * 1. / self.data_size
def __len__(self):
if self.batch_size_fn is not None:
raise NotImplementedError
return math.ceil(self.data_size / self.batch_size)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--train_src_path", type=str, default=None)
parser.add_argument("--train_tgt_path", type=str, default=None)
parser.add_argument("--dev_src_path", type=str, default=None)
parser.add_argument("--dev_tgt_path", type=str, default=None)
parser.add_argument("--test_src_path", type=str, default=None)
parser.add_argument("--test_tgt_path", type=str, default=None)
parser.add_argument("--src_vocab_path", type=str, default="src.vocab")
parser.add_argument("--tgt_vocab_path", type=str, default="tgt.vocab")
parser.add_argument("--batch_size", type=int, default=50)
parser.add_argument("--batch_by_tokens", type=int, default=1, help="0 is False")
parser.add_argument("--max_src_length", type=int, default=80)
parser.add_argument("--max_tgt_length", type=int, default=80)
parser.add_argument("--buffer_multiple_size", type=int, default=3)
parser.add_argument("--src_max_vocab", type=int, default=50000)
parser.add_argument("--tgt_max_vocab", type=int, default=50000)
parser.add_argument("--create_vocab", type=int, default=0)
args = parser.parse_args()
model_path = "debug"
dataset = NMTDataSet(args.train_src_path, args.train_tgt_path, args.dev_src_path, args.dev_tgt_path,
args.test_src_path, args.test_tgt_path, args.src_vocab_path, args.tgt_vocab_path,
args.src_max_vocab, args.tgt_max_vocab, args.create_vocab)
train_iterator = DataIterator(dataset, args.batch_size, args.batch_by_tokens, args.max_src_length, args.max_tgt_length,
args.buffer_multiple_size, device="cpu", model_path=model_path, corpus="train")
dev_iterator = DataIterator(dataset, args.batch_size, args.batch_by_tokens, args.max_src_length, args.max_tgt_length,
args.buffer_multiple_size, device="cpu", model_path=model_path, corpus="dev")
# test_iterator = DataIterator(dataset, args, device="cpu", corpus="test")
def _print(batch, id2word):
for sent in batch:
if id2word is None:
print(" ".join([str(i) for i in sent]) + "\n")
else:
print(" ".join([id2word[w] for w in sent]) + "\n")
step = 0
for src_batch, tgt_batch, src_mask in train_iterator:
print("Epoch = %f\n" % train_iterator.epoch)
print("---src batch %d ----" % step)
_print(src_batch.numpy(), dataset.src_id2word)
print("---tgt batch %d ----" % step)
_print(tgt_batch.numpy(), dataset.tgt_id2word)
print("---src mask %d ----" % step)
_print(src_mask.numpy(), None)
step += 1
if step % 10 == 0:
break
print("############### Dev ###############")
step = 0
for src_batch, tgt_batch, src_mask in dev_iterator:
print("Epoch = %f\n" % dev_iterator.epoch)
print("---src batch %d ----" % step)
_print(src_batch.numpy(), dataset.src_id2word)
print("---tgt batch %d ----" % step)
_print(tgt_batch.numpy(), dataset.tgt_id2word)
print("---src mask %d ----" % step)
_print(src_mask.numpy(), None)
step += 1
| 21,852 | 39.097248 | 149 | py |
flowseq | flowseq-master/flownmt/data/__init__.py | __author__ = 'violet-zct'
from flownmt.data.dataloader import NMTDataSet, DataIterator
| 88 | 21.25 | 60 | py |
flowseq | flowseq-master/experiments/nmt.py | import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import time
import json
import random
import math
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
import torch.distributed as dist
from flownmt.data import NMTDataSet, DataIterator
from flownmt import FlowNMT
from flownmt.utils import total_grad_norm
from flownmt.optim import AdamW, InverseSquareRootScheduler, ExponentialScheduler
from experiments.options import parse_args
def logging(info, logfile):
print(info)
print(info, file=logfile)
logfile.flush()
def get_optimizer(learning_rate, parameters, betas, eps, amsgrad, weight_decay, lr_decay, warmup_steps, init_lr):
optimizer = AdamW(parameters, lr=learning_rate, betas=betas, eps=eps, amsgrad=amsgrad, weight_decay=weight_decay)
if lr_decay == 'inv_sqrt':
scheduler = InverseSquareRootScheduler(optimizer, warmup_steps, init_lr)
elif lr_decay == 'expo':
step_decay = 0.999995
scheduler = ExponentialScheduler(optimizer, step_decay, warmup_steps, init_lr)
else:
raise ValueError('unknown lr decay method: %s' % lr_decay)
return optimizer, scheduler
def calc_bleu(fref, fmt, result_path):
script = os.path.join(current_path, 'scripts/multi-bleu.perl')
temp = os.path.join(result_path, 'tmp')
os.system("perl %s %s < %s > %s" % (script, fref, fmt, temp))
bleu = open(temp, 'r').read().strip()
bleu = bleu.split(",")[0].split("=")
if len(bleu) < 2:
return 0.0
bleu = float(bleu[1].strip())
return bleu
def translate(epoch, dataset, dataloader, flownmt, result_path, log):
flownmt.eval()
taus = [0.0,]
bleu = 0
logging('argmax translating...', log)
for tau in taus:
n_tr = 8 if tau > 1e-4 else 1
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
for src, tgt, src_masks, tgt_masks in dataloader:
trans, lens = flownmt.translate_argmax(src, src_masks, n_tr=n_tr, tau=tau)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
time_cost = time.time() - start_time
outfile = os.path.join(result_path, 'trans{}.t{:.1f}.mt'.format(epoch, 0.0))
dataset.dump_to_file(translations, lengths, outfile)
b = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
logging('#SENT: {}, Tau: {:.1f}, Length Err: {:.1f}, BLEU: {:.2f}, time: {:.1f}s'.format(
num_insts, tau, length_err / num_insts, b, time_cost), log)
if bleu < b:
bleu = b
taus = []
if len(taus) > 0:
logging('importance weighted translating...', log)
n_len = 3
iwk = 4
for tau in taus:
n_tr = 8 if tau > 1e-4 else 1
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
for src, tgt, src_masks, tgt_masks in dataloader:
trans, lens = flownmt.translate_iw(src, src_masks, n_len=n_len, n_tr=n_tr, tau=tau, k=iwk)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
time_cost = time.time() - start_time
outfile = os.path.join(result_path, 'trans{}.t{:.1f}.mt'.format(epoch, tau))
dataset.dump_to_file(translations, lengths, outfile)
b = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
logging('Temperature: {:.1f}, Length Err: {:.1f}, BLEU: {:.2f}, time: {:.1f}s'.format(tau, length_err / num_insts, b, time_cost), log)
if bleu < b:
bleu = b
return bleu
def reconstruct(epoch, dataset, dataloader, flownmt, result_path, log):
flownmt.eval()
recons = []
lengths = []
recon_loss = 0.
length_loss = 0.
length_loss_pred = 0.
length_err = 0.
num_insts = 0
num_words = 0
start_time = time.time()
for src, tgt, src_masks, tgt_masks in dataloader:
recon, recon_err, llen, lens, llen_pred = flownmt.reconstruct(src, tgt, src_masks, tgt_masks)
recon_loss += recon_err.sum().item()
length_loss += llen.sum().item()
length_loss_pred += llen_pred.sum().item()
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
num_words += tgt_masks.sum().item()
recons.append(recon)
lengths.append(tgt_masks.sum(dim=1).long())
logging('reconstruct time: {:.1f}s'.format(time.time() - start_time), log)
outfile = os.path.join(result_path, 'reconstruct{}.mt'.format(epoch))
dataset.dump_to_file(recons, lengths, outfile)
bleu = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
recon_loss_per_word = recon_loss / num_words
recon_loss = recon_loss / num_insts
length_loss = length_loss / num_insts
length_loss_pred = length_loss_pred / num_insts
length_err = length_err / num_insts
logging('Reconstruct BLEU: {:.2f}, NLL: {:.2f} ({:.2f}), Length NLL: {:.2f} ({:.2f}), Err: {:.1f}'.format(
bleu, recon_loss, recon_loss_per_word, length_loss, length_loss_pred, length_err), log)
def eval(args, epoch, dataset, dataloader, flownmt):
flownmt.eval()
flownmt.sync()
# reconstruct
reconstruct(epoch, dataset, dataloader, flownmt, args.result_path, args.log)
# translate
bleu = translate(epoch, dataset, dataloader, flownmt, args.result_path, args.log)
recon_loss = 0.
kl_loss = 0.
length_loss = 0.
num_insts = 0
num_words = 0
test_k = 3
for src, tgt, src_masks, tgt_masks in dataloader:
recon, kl, llen = flownmt.loss(src, tgt, src_masks, tgt_masks, nsamples=test_k, eval=True)
recon_loss += recon.sum().item()
kl_loss += kl.sum().item()
length_loss += llen.sum().item()
num_insts += src.size(0)
num_words += tgt_masks.sum().item()
kl_loss = kl_loss / num_insts
recon_loss = recon_loss / num_insts
length_loss = length_loss / num_insts
nll = kl_loss + recon_loss
ppl = np.exp(nll * num_insts / num_words)
logging('Ave NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}'.format(
nll, recon_loss, kl_loss, length_loss, ppl, bleu), args.log)
logging('-' * 100, args.log)
return bleu, nll, recon_loss, kl_loss, length_loss, ppl
def setup(args):
args.cuda = torch.cuda.is_available()
random_seed = args.seed + args.rank if args.rank >= 0 else args.seed
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
device = torch.device('cuda', args.local_rank) if args.cuda else torch.device('cpu')
if args.cuda:
torch.cuda.set_device(device)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.benchmark = False
model_path = args.model_path
args.checkpoint_name = os.path.join(model_path, 'checkpoint')
result_path = os.path.join(model_path, 'translations')
args.result_path = result_path
vocab_path = os.path.join(model_path, 'vocab')
data_path = args.data_path
args.world_size = int(os.environ["WORLD_SIZE"]) if args.rank >=0 else 0
print("Rank {}".format(args.rank), args)
if args.rank <= 0:
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(result_path):
os.makedirs(result_path)
if not os.path.exists(vocab_path):
os.makedirs(vocab_path)
args.log = open(os.path.join(model_path, 'log.txt'), 'w')
if args.recover > 0:
params = json.load(open(os.path.join(model_path, 'config.json'), 'r'))
src_lang = params['src']
tgt_lang = params['tgt']
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
src_vocab_size = params['src_vocab_size']
tgt_vocab_size = params['tgt_vocab_size']
args.max_src_length = params['max_src_length']
args.max_tgt_length = params['max_tgt_length']
dataset = NMTDataSet(data_path, src_lang, tgt_lang,
src_vocab_path, tgt_vocab_path,
src_vocab_size, tgt_vocab_size,
subword=args.subword, create_vocab=False)
assert src_vocab_size == dataset.src_vocab_size
assert tgt_vocab_size == dataset.tgt_vocab_size
else:
params = json.load(open(args.config, 'r'))
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
create_vocab = args.create_vocab
src_max_vocab = params.pop('{}_vocab_size'.format(src_lang))
tgt_max_vocab = params.pop('{}_vocab_size'.format(tgt_lang))
args.max_src_length = params.pop('max_{}_length'.format(src_lang))
args.max_tgt_length = params.pop('max_{}_length'.format(tgt_lang))
dataset = NMTDataSet(data_path, src_lang, tgt_lang,
src_vocab_path, tgt_vocab_path,
src_max_vocab, tgt_max_vocab,
subword=args.subword, create_vocab=create_vocab)
params['src'] = src_lang
params['tgt'] = tgt_lang
params['src_vocab_size'] = dataset.src_vocab_size
params['tgt_vocab_size'] = dataset.tgt_vocab_size
params['max_src_length'] = args.max_src_length
params['max_tgt_length'] = args.max_tgt_length
params['src_pad_idx'] = dataset.src_pad_idx
params['tgt_pad_idx'] = dataset.tgt_pad_idx
if args.share_all_embeddings:
assert 'share_embed' not in params or params['share_embed'], 'share embedding args conflicts'
assert 'tie_weights' not in params or params['tie_weights'], 'tie weights args conflicts'
params['share_embed'] = True
params['tie_weights'] = True
else:
params.setdefault('share_embed', False)
params.setdefault('tie_weights', False)
json.dump(params, open(os.path.join(model_path, 'config.json'), 'w'), indent=2)
flownmt = FlowNMT.from_params(params)
flownmt.to(device)
args.length_unit = flownmt.length_unit
args.device = device
args.steps_per_epoch = 1000
return args, dataset, flownmt
def init_dataloader(args, dataset):
batch_by_tokens = args.loss_type == 'token'
train_iter = DataIterator(dataset, args.batch_size, batch_by_tokens, args.max_src_length, args.max_tgt_length,
5000, args.device, args.result_path, multi_scale=args.length_unit,
corpus="train", bucket_data=args.bucket_batch, rank=args.rank,
num_replicas=args.world_size)
if args.rank <= 0:
eval_batch = args.eval_batch_size
val_iter = DataIterator(dataset, eval_batch, batch_by_tokens, args.max_src_length, args.max_tgt_length,
1000, args.device, args.result_path, corpus="dev",
bucket_data=args.bucket_batch, multi_scale=args.length_unit)
test_iter = DataIterator(dataset, eval_batch, batch_by_tokens, args.max_src_length, args.max_tgt_length,
1000, args.device, args.result_path, corpus="test",
bucket_data=args.bucket_batch, multi_scale=args.length_unit)
else:
val_iter, test_iter = None, None
return train_iter, val_iter, test_iter
def init_model(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if args.rank <= 0:
logging('Rank {}, init model: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init model: {} instances'.format(args.rank, init_batch_size))
src_sents, tgt_sents, src_masks, tgt_masks = train_iter.get_batch(init_batch_size)
if args.rank <= 0:
logging("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0)
def init_posterior(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if args.rank <= 0:
logging('Rank {}, init posterior: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init posterior: {} instances'.format(args.rank, init_batch_size))
src_sents, tgt_sents, src_masks, tgt_masks = train_iter.get_batch(init_batch_size)
if args.rank <= 0:
logging("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init_posterior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0)
def init_prior(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if args.rank <= 0:
logging('Rank {}, init prior: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init prior: {} instances'.format(args.rank, init_batch_size))
src_sents, tgt_sents, src_masks, tgt_masks = train_iter.get_batch(init_batch_size)
if args.rank <= 0:
logging("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init_prior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0)
def pretrain_model(args, dataset, train_iter, val_iter, flownmt, zero_steps):
device = args.device
steps_per_epoch = args.steps_per_epoch
loss_ty_token = args.loss_type == 'token'
lr_decay = args.lr_decay
betas = (args.beta1, args.beta2)
eps = args.eps
amsgrad = args.amsgrad
weight_decay = args.weight_decay
grad_clip = args.grad_clip
batch_steps = max(1, args.batch_steps // 2)
log = args.log if args.rank <=0 else None
warmup_steps = min(4000, zero_steps)
optimizer, scheduler = get_optimizer(args.lr, flownmt.parameters(), betas, eps, amsgrad, weight_decay, lr_decay,
warmup_steps, init_lr=1e-7)
lr = scheduler.get_lr()[0]
recon_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
if args.rank <= 0:
logging('Init Epoch: %d, lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e, amsgrad=%s, l2=%.1e' % (
1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay), log)
for step, (src_sents, tgt_sents, src_masks, tgt_masks) in enumerate(train_iter):
batch_size = src_sents.size(0)
words = tgt_masks.sum().item()
recon_batch = 0.
llen_batch = 0.
optimizer.zero_grad()
src_sents = [src_sents, ] if batch_steps == 1 else src_sents.chunk(batch_steps, dim=0)
tgt_sents = [tgt_sents, ] if batch_steps == 1 else tgt_sents.chunk(batch_steps, dim=0)
src_masks = [src_masks, ] if batch_steps == 1 else src_masks.chunk(batch_steps, dim=0)
tgt_masks = [tgt_masks, ] if batch_steps == 1 else tgt_masks.chunk(batch_steps, dim=0)
# disable allreduce for accumulated gradient.
if args.rank >= 0:
flownmt.disable_allreduce()
for src, tgt, src_mask, tgt_mask in zip(src_sents[:-1], tgt_sents[:-1], src_masks[:-1], tgt_masks[:-1]):
recon, llen = flownmt.reconstruct_error(src, tgt, src_mask, tgt_mask)
recon = recon.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + llen).div(words)
else:
loss = (recon + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
llen_batch += llen.item()
# enable allreduce for the last step.
if args.rank >= 0:
flownmt.enable_allreduce()
src, tgt, src_mask, tgt_mask = src_sents[-1], tgt_sents[-1], src_masks[-1], tgt_masks[-1]
recon, llen = flownmt.reconstruct_error(src, tgt, src_mask, tgt_mask)
recon = recon.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + llen).div(words)
else:
loss = (recon + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
llen_batch += llen.item()
if grad_clip > 0:
grad_norm = clip_grad_norm_(flownmt.parameters(), grad_clip)
else:
grad_norm = total_grad_norm(flownmt.parameters())
if math.isnan(grad_norm):
num_nans += 1
else:
optimizer.step()
scheduler.step()
with torch.no_grad():
num_insts += batch_size
num_words += words
recon_loss += recon_batch
length_loss += llen_batch
if step % 10 == 0:
torch.cuda.empty_cache()
if step % args.log_interval == 0 and args.rank <= 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
recon_per_word = recon_loss.item() / num_words.item() if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
curr_step = step % steps_per_epoch
curr_lr = scheduler.get_lr()[0]
log_info = '[{}/{} ({:.0f}%) lr={:.6f} {}] recon: {:.2f} ({:.2f}), len: {:.2f}'.format(
curr_step, steps_per_epoch, 100. * curr_step / steps_per_epoch, curr_lr, num_nans,
train_recon, recon_per_word,
train_llen)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
if step % steps_per_epoch == 0 and step > 0 or step == zero_steps:
# new epoch
epoch = step // steps_per_epoch
lr = scheduler.get_lr()[0]
if args.rank >= 0:
dist.reduce(recon_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(length_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_insts, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_words, dst=0, op=dist.ReduceOp.SUM)
if args.rank <= 0:
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
recon_per_word = recon_loss.item() / num_words.item() if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
logging('Average recon: {:.2f}, ({:.2f}), len: {:.2f}, time: {:.1f}s'.format(
train_recon, recon_per_word, train_llen, time.time() - start_time), log)
logging('-' * 100, log)
with torch.no_grad():
reconstruct(epoch, dataset, val_iter, flownmt, args.result_path, log)
logging('-' * 100, log)
if step == zero_steps:
optimizer.zero_grad()
break
if args.rank <= 0:
logging('Init Epoch: %d, lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e amsgrad=%s, l2=%.1e' % (
epoch + 1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay), log)
recon_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
def train(args, dataset, train_iter, val_iter, test_iter, flownmt):
epochs = args.epochs
loss_ty_token = args.loss_type == 'token'
steps_per_epoch = args.steps_per_epoch
train_k = args.train_k
grad_clip = args.grad_clip
batch_steps = args.batch_steps
device = args.device
log = args.log if args.rank <=0 else None
kl_warmups = args.kl_warmup_steps
kl_annealing = lambda step: min(1.0, (step + 1) / float(kl_warmups)) if kl_warmups > 0 else 1.0
lr_decay = args.lr_decay
init_lr = args.lr
if lr_decay == 'expo':
lr_warmups = 0
elif lr_decay == 'inv_sqrt':
lr_warmups = 10000
else:
raise ValueError('unknown lr decay method: %s' % lr_decay)
betas = (args.beta1, args.beta2)
eps = args.eps
amsgrad = args.amsgrad
weight_decay = args.weight_decay
if args.recover > 0:
checkpoint_name = args.checkpoint_name + '{}.tar'.format(args.recover)
print(f"Rank = {args.rank}, loading from checkpoint {checkpoint_name}")
optimizer, scheduler = get_optimizer(args.lr, flownmt.parameters(), betas, eps, amsgrad=amsgrad,
weight_decay=weight_decay, lr_decay=lr_decay,
warmup_steps=lr_warmups, init_lr=init_lr)
checkpoint = torch.load(checkpoint_name, map_location=args.device)
epoch = checkpoint['epoch']
last_step = checkpoint['step']
flownmt.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
best_epoch = checkpoint['best_epoch']
best_bleu, test_bleu = checkpoint['best_bleu']
best_nll, test_nll = checkpoint['best_nll']
best_recon, test_recon = checkpoint['best_recon']
best_kl, test_kl = checkpoint['best_kl']
best_llen, test_llen = checkpoint['best_llen']
best_ppl, test_ppl = checkpoint['best_ppl']
del checkpoint
if args.rank <= 0:
with torch.no_grad():
logging('Evaluating after resuming model...', log)
eval(args, epoch, dataset, val_iter, flownmt)
else:
optimizer, scheduler = get_optimizer(args.lr, flownmt.parameters(), betas, eps, amsgrad=amsgrad,
weight_decay=weight_decay, lr_decay=lr_decay,
warmup_steps=lr_warmups, init_lr=init_lr)
epoch = 0
best_epoch = 0
best_bleu = 0.0
best_nll = 0.0
best_recon = 0.0
best_kl = 0.0
best_llen = 0.0
best_ppl = 0.0
last_step = -1
lr = scheduler.get_lr()[0]
recon_loss = torch.Tensor([0.]).to(device)
kl_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
if args.rank <= 0:
logging('Epoch: %d (lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e, amsgrad=%s, l2=%.1e, train_k=%d)' % (
epoch + 1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay, train_k), log)
for step, (src_sents, tgt_sents, src_masks, tgt_masks) in enumerate(train_iter):
if step <= last_step:
continue
optimizer.zero_grad()
batch_size = src_sents.size(0)
words = tgt_masks.sum().item()
recon_batch = 0
kl_batch = 0
llen_batch = 0
kl_weight = kl_annealing(step)
src_sents = [src_sents, ] if batch_steps == 1 else src_sents.chunk(batch_steps, dim=0)
tgt_sents = [tgt_sents, ] if batch_steps == 1 else tgt_sents.chunk(batch_steps, dim=0)
src_masks = [src_masks, ] if batch_steps == 1 else src_masks.chunk(batch_steps, dim=0)
tgt_masks = [tgt_masks, ] if batch_steps == 1 else tgt_masks.chunk(batch_steps, dim=0)
# disable allreduce for accumulated gradient.
if args.rank >= 0:
flownmt.disable_allreduce()
for src, tgt, src_mask, tgt_mask in zip(src_sents[:-1], tgt_sents[:-1], src_masks[:-1], tgt_masks[:-1]):
recon, kl, llen = flownmt.loss(src, tgt, src_masks=src_mask, tgt_masks=tgt_mask,
nsamples=train_k)
recon = recon.sum()
kl = kl.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + kl * kl_weight + llen).div(words)
else:
loss = (recon + kl * kl_weight + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
kl_batch += kl.item()
llen_batch += llen.item()
# enable allreduce for the last step.
if args.rank >= 0:
flownmt.enable_allreduce()
src, tgt, src_mask, tgt_mask = src_sents[-1], tgt_sents[-1], src_masks[-1], tgt_masks[-1]
recon, kl, llen = flownmt.loss(src, tgt, src_masks=src_mask, tgt_masks=tgt_mask,
nsamples=train_k)
recon = recon.sum()
kl = kl.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + kl * kl_weight + llen).div(words)
else:
loss = (recon + kl * kl_weight + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
kl_batch += kl.item()
llen_batch += llen.item()
if grad_clip > 0:
grad_norm = clip_grad_norm_(flownmt.parameters(), grad_clip)
else:
grad_norm = total_grad_norm(flownmt.parameters())
if math.isnan(grad_norm):
num_nans += 1
else:
optimizer.step()
scheduler.step()
num_insts += batch_size
num_words += words
kl_loss += kl_batch
recon_loss += recon_batch
length_loss += llen_batch
if step % 10 == 0:
torch.cuda.empty_cache()
if step % args.log_interval == 0 and args.rank <= 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
train_kl = kl_loss.item() / nums if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
train_nll = train_recon + train_kl
train_ppl = np.exp(train_nll * nums / num_words.item()) if nums > 0 else 0
train_ppl = float('inf') if train_ppl > 10000 else train_ppl
curr_lr = scheduler.get_lr()[0]
curr_step = step if step == steps_per_epoch else step % steps_per_epoch
log_info = '[{}/{} ({:.0f}%) lr={:.6f}, klw={:.2f} {}] NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}'.format(
curr_step, steps_per_epoch, 100. * curr_step / steps_per_epoch, curr_lr, kl_weight, num_nans,
train_nll, train_recon, train_kl, train_llen, train_ppl)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
if step % steps_per_epoch == 0 and step > 0:
# new epoch
epoch = step // steps_per_epoch
lr = scheduler.get_lr()[0]
if args.rank >= 0:
dist.reduce(recon_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(kl_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(length_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_insts, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_words, dst=0, op=dist.ReduceOp.SUM)
if args.rank <= 0:
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
train_kl = kl_loss.item() / nums if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
train_nll = train_recon + train_kl
train_ppl = np.exp(train_nll * nums / num_words.item()) if nums > 0 else 0
train_ppl = float('inf') if train_ppl > 10000 else train_ppl
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
logging('Average NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, time: {:.1f}s'.format(
train_nll, train_recon, train_kl, train_llen, train_ppl, time.time() - start_time), log)
logging('-' * 100, log)
with torch.no_grad():
logging('Evaluating validation data...', log)
bleu, nll, recon, kl, llen, ppl = eval(args, epoch, dataset, val_iter, flownmt)
if bleu > best_bleu or best_epoch == 0 or ppl < best_ppl:
flownmt.save(args.model_path)
best_bleu = bleu
best_epoch = epoch
best_nll = nll
best_recon = recon
best_kl = kl
best_llen = llen
best_ppl = ppl
logging('Evaluating test data...', log)
test_bleu, test_nll, test_recon, test_kl, test_llen, test_ppl = eval(args, epoch, dataset, test_iter, flownmt)
logging('Best Dev NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}, epoch: {}'.format(
best_nll, best_recon, best_kl, best_llen, best_ppl, best_bleu, best_epoch), log)
logging('Best Test NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}, epoch: {}'.format(
test_nll, test_recon, test_kl, test_llen, test_ppl, test_bleu, best_epoch), log)
logging('=' * 100, log)
# save checkpoint
checkpoint_name = args.checkpoint_name + '{}.tar'.format(epoch)
torch.save({'epoch': epoch,
'step': step,
'model': flownmt.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'best_bleu': [best_bleu, test_bleu],
'best_epoch': best_epoch,
'best_nll': [best_nll, test_nll],
'best_recon': [best_recon, test_recon],
'best_kl': [best_kl, test_kl],
'best_llen': [best_llen, test_llen],
'best_ppl': [best_ppl, test_ppl]}, checkpoint_name)
if epoch == epochs:
break
if args.rank <= 0:
logging('Epoch: %d (lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e, amsgrad=%s, l2=%.1e, train_k=%d)' % (
epoch + 1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay, train_k), log)
recon_loss = torch.Tensor([0.]).to(device)
kl_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
def main(args):
args, dataset, flownmt = setup(args)
train_iter, val_iter, test_iter = init_dataloader(args, dataset)
pretrain = args.recover < 0 and args.init_steps > 0
checkpoint_name = args.checkpoint_name + '{}.tar'.format(0)
if args.rank <= 0:
# initialize model (rank 0 or -1)
# number of parameters
logging('Rank %d # of Parameters: %d' % (args.rank, sum([param.numel() for param in flownmt.parameters()])), args.log)
if args.recover == 0:
flownmt.load_core(checkpoint_name, args.device, load_prior=True)
with torch.no_grad():
reconstruct(0, dataset, val_iter, flownmt, args.result_path, args.log)
logging('-' * 100, args.log)
if args.rank >= 0:
flownmt.init_distributed(args.rank, args.local_rank)
if pretrain:
init_posterior(args, train_iter, flownmt)
elif args.recover < 0:
init_model(args, train_iter, flownmt)
if args.rank >= 0:
flownmt.sync_params()
if pretrain:
zero_steps = args.init_steps
pretrain_model(args, dataset, train_iter, val_iter, flownmt, zero_steps)
init_prior(args, train_iter, flownmt)
if args.rank >= 0:
flownmt.sync_params()
if args.rank <= 0:
flownmt.save_core(checkpoint_name)
train(args, dataset, train_iter, val_iter, test_iter, flownmt)
if __name__ == "__main__":
args = parse_args()
assert args.rank == -1 and args.local_rank == 0, 'single process should have wrong rank ({}) or local rank ({})'.format(args.rank, args.local_rank)
main(args)
| 34,259 | 41.559006 | 151 | py |
flowseq | flowseq-master/experiments/slurm.py | import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import torch.multiprocessing as mp
import experiments.options as options
from experiments.nmt import main as single_process_main
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
args_dict.pop('master_addr')
str(args_dict.pop('master_port'))
args_dict.pop('nnodes')
args_dict.pop('nproc_per_node')
args_dict.pop('node_rank')
current_env = os.environ
nnodes = int(current_env['SLURM_NNODES'])
dist_world_size = int(current_env['SLURM_NTASKS'])
args.rank = int(current_env['SLURM_PROCID'])
args.local_rank = int(current_env['SLURM_LOCALID'])
print('start process: rank={}({}), master addr={}, port={}, nnodes={}, world size={}'.format(
args.rank, args.local_rank, current_env["MASTER_ADDR"], current_env["MASTER_PORT"], nnodes, dist_world_size))
current_env["WORLD_SIZE"] = str(dist_world_size)
create_vocab = args_dict.pop('create_vocab')
assert not create_vocab
args.create_vocab = False
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
single_process_main(args)
if __name__ == "__main__":
mp.set_start_method('forkserver')
main()
| 1,374 | 27.645833 | 117 | py |
flowseq | flowseq-master/experiments/translate.py | import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import time
import json
import random
import numpy as np
import torch
from flownmt.data import NMTDataSet, DataIterator
from flownmt import FlowNMT
from experiments.options import parse_translate_args
def calc_bleu(fref, fmt, result_path):
script = os.path.join(current_path, 'scripts/multi-bleu.perl')
temp = os.path.join(result_path, 'tmp')
os.system("perl %s %s < %s > %s" % (script, fref, fmt, temp))
bleu = open(temp, 'r').read().strip()
bleu = bleu.split(",")[0].split("=")
if len(bleu) < 2:
return 0.0
bleu = float(bleu[1].strip())
return bleu
def translate_argmax(dataset, dataloader, flownmt, result_path, outfile, tau, n_tr):
flownmt.eval()
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
num_back = 0
for step, (src, tgt, src_masks, tgt_masks) in enumerate(dataloader):
trans, lens = flownmt.translate_argmax(src, src_masks, n_tr=n_tr, tau=tau)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
if step % 10 == 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
log_info = 'argmax translating (tau={:.1f}, n_tr={})...{}'.format(tau, n_tr, num_insts)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
print('time: {:.1f}s'.format(time.time() - start_time))
outfile = os.path.join(result_path, outfile)
dataset.dump_to_file(translations, lengths, outfile)
bleu = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
print('#SENT: {}, Length Err: {:.1f}, BLEU: {:.2f}'.format(num_insts, length_err / num_insts, bleu))
def translate_iw(dataset, dataloader, flownmt, result_path, outfile, tau, n_len, n_tr):
flownmt.eval()
iwk = 4
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
num_back = 0
for step, (src, tgt, src_masks, tgt_masks) in enumerate(dataloader):
trans, lens = flownmt.translate_iw(src, src_masks, n_len=n_len, n_tr=n_tr, tau=tau, k=iwk)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
if step % 10 == 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
log_info = 'importance weighted translating (tau={:.1f}, n_len={}, n_tr={})...{}'.format(tau, n_len, n_tr, num_insts)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
print('time: {:.1f}s'.format(time.time() - start_time))
outfile = os.path.join(result_path, outfile)
dataset.dump_to_file(translations, lengths, outfile)
bleu = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
print('#SENT: {}, Length Err: {:.1f}, BLEU: {:.2f}'.format(num_insts, length_err / num_insts, bleu))
def sample(dataset, dataloader, flownmt, result_path, outfile, tau, n_len, n_tr):
flownmt.eval()
lengths = []
translations = []
num_insts = 0
start_time = time.time()
num_back = 0
for step, (src, tgt, src_masks, tgt_masks) in enumerate(dataloader):
trans, lens = flownmt.translate_sample(src, src_masks, n_len=n_len, n_tr=n_tr, tau=tau)
translations.append(trans)
lengths.append(lens)
num_insts += src.size(0)
if step % 10 == 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
log_info = 'sampling (tau={:.1f}, n_len={}, n_tr={})...{}'.format(tau, n_len, n_tr, num_insts)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
print('time: {:.1f}s'.format(time.time() - start_time))
outfile = os.path.join(result_path, outfile)
dataset.dump_to_file(translations, lengths, outfile, post_edit=False)
def setup(args):
args.cuda = torch.cuda.is_available()
random_seed = args.seed
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
device = torch.device('cuda', 0) if args.cuda else torch.device('cpu')
if args.cuda:
torch.cuda.set_device(device)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.benchmark = False
model_path = args.model_path
result_path = os.path.join(model_path, 'translations')
args.result_path = result_path
params = json.load(open(os.path.join(model_path, 'config.json'), 'r'))
src_lang = params['src']
tgt_lang = params['tgt']
data_path = args.data_path
vocab_path = os.path.join(model_path, 'vocab')
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
src_vocab_size = params['src_vocab_size']
tgt_vocab_size = params['tgt_vocab_size']
args.max_src_length = params.pop('max_src_length')
args.max_tgt_length = params.pop('max_tgt_length')
dataset = NMTDataSet(data_path, src_lang, tgt_lang,
src_vocab_path, tgt_vocab_path,
src_vocab_size, tgt_vocab_size,
subword=args.subword, create_vocab=False)
assert src_vocab_size == dataset.src_vocab_size
assert tgt_vocab_size == dataset.tgt_vocab_size
flownmt = FlowNMT.load(model_path, device=device)
args.length_unit = flownmt.length_unit
args.device = device
return args, dataset, flownmt
def init_dataloader(args, dataset):
eval_batch = args.batch_size
val_iter = DataIterator(dataset, eval_batch, 0, args.max_src_length, args.max_tgt_length, 1000, args.device, args.result_path,
bucket_data=args.bucket_batch, multi_scale=args.length_unit, corpus="dev")
test_iter = DataIterator(dataset, eval_batch, 0, args.max_src_length, args.max_tgt_length, 1000, args.device, args.result_path,
bucket_data=args.bucket_batch, multi_scale=args.length_unit, corpus="test")
return val_iter, test_iter
def main(args):
args, dataset, flownmt = setup(args)
print(args)
val_iter, test_iter = init_dataloader(args, dataset)
result_path = args.result_path
if args.decode == 'argmax':
tau = args.tau
n_tr = args.ntr
outfile = 'argmax.t{:.1f}.ntr{}.dev.mt'.format(tau, n_tr)
translate_argmax(dataset, val_iter, flownmt, result_path, outfile, tau, n_tr)
outfile = 'argmax.t{:.1f}.ntr{}.test.mt'.format(tau, n_tr)
translate_argmax(dataset, test_iter, flownmt, result_path, outfile, tau, n_tr)
elif args.decode == 'iw':
tau = args.tau
n_len = args.nlen
n_tr = args.ntr
outfile = 'iw.t{:.1f}.nlen{}.ntr{}.dev.mt'.format(tau, n_len, n_tr)
translate_iw(dataset, val_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
outfile = 'iw.t{:.1f}.nlen{}.ntr{}.test.mt'.format(tau, n_len, n_tr)
translate_iw(dataset, test_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
else:
assert not args.bucket_batch
tau = args.tau
n_len = args.nlen
n_tr = args.ntr
outfile = 'sample.t{:.1f}.nlen{}.ntr{}.dev.mt'.format(tau, n_len, n_tr)
sample(dataset, val_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
outfile = 'sample.t{:.1f}.nlen{}.ntr{}.test.mt'.format(tau, n_len, n_tr)
sample(dataset, test_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
if __name__ == "__main__":
args = parse_translate_args()
with torch.no_grad():
main(args) | 8,142 | 39.311881 | 131 | py |
flowseq | flowseq-master/experiments/options.py | import os, sys
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description='FlowNMT')
parser.add_argument('--rank', type=int, default=-1, metavar='N', help='rank of the process in all distributed processes')
parser.add_argument("--local_rank", type=int, default=0, metavar='N', help='rank of the process in the machine')
parser.add_argument('--config', type=str, help='config file', required=True)
parser.add_argument('--batch_size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--eval_batch_size', type=int, default=4, metavar='N',
help='input batch size for eval (default: 4)')
parser.add_argument('--batch_steps', type=int, default=1, metavar='N',
help='number of steps for each batch (the batch size of each step is batch-size / steps (default: 1)')
parser.add_argument('--init_batch_size', type=int, default=1024, metavar='N',
help='number of instances for model initialization (default: 1024)')
parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train')
parser.add_argument('--kl_warmup_steps', type=int, default=10000, metavar='N', help='number of steps to warm up KL weight(default: 10000)')
parser.add_argument('--init_steps', type=int, default=5000, metavar='N', help='number of steps to train decoder (default: 5000)')
parser.add_argument('--seed', type=int, default=65537, metavar='S', help='random seed (default: 65537)')
parser.add_argument('--loss_type', choices=['sentence', 'token'], default='sentence',
help='loss type (default: sentence)')
parser.add_argument('--train_k', type=int, default=1, metavar='N', help='training K (default: 1)')
parser.add_argument('--log_interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status')
parser.add_argument('--lr_decay', choices=['inv_sqrt', 'expo'], help='lr decay method', default='inv_sqrt')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 of Adam')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 of Adam')
parser.add_argument('--eps', type=float, default=1e-6, help='eps of Adam')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight for l2 norm decay')
parser.add_argument('--amsgrad', action='store_true', help='AMS Grad')
parser.add_argument('--grad_clip', type=float, default=0, help='max norm for gradient clip (default 0: no clip')
parser.add_argument('--model_path', help='path for saving model file.', required=True)
parser.add_argument('--data_path', help='path for data file.', default=None)
parser.add_argument('--src', type=str, help='source language code', required=True)
parser.add_argument('--tgt', type=str, help='target language code', required=True)
parser.add_argument('--create_vocab', action='store_true', help='create vocabulary.')
parser.add_argument('--share_all_embeddings', action='store_true', help='share source, target and output embeddings')
parser.add_argument("--subword", type=str, default="joint-bpe", choices=['joint-bpe', 'sep-bpe', 'word', 'bert-bpe', 'joint-spm'])
parser.add_argument('--recover', type=int, default=-1, help='recover the model from disk.')
parser.add_argument("--bucket_batch", type=int, default=0, help="whether bucket data based on tgt length in batching")
return parser.parse_args()
def parse_translate_args():
parser = ArgumentParser(description='FlowNMT')
parser.add_argument('--batch_size', type=int, default=512, metavar='N', help='input batch size for training (default: 512)')
parser.add_argument('--seed', type=int, default=524287, metavar='S', help='random seed (default: 65537)')
parser.add_argument('--model_path', help='path for saving model file.', required=True)
parser.add_argument('--data_path', help='path for data file.', default=None)
parser.add_argument("--subword", type=str, default="joint-bpe", choices=['joint-bpe', 'sep-bpe', 'word', 'bert-bpe', 'joint-spm'])
parser.add_argument("--bucket_batch", type=int, default=0, help="whether bucket data based on tgt length in batching")
parser.add_argument('--decode', choices=['argmax', 'iw', 'sample'], help='decoding algorithm', default='argmax')
parser.add_argument('--tau', type=float, default=0.0, metavar='S', help='temperature for iw decoding (default: 0.)')
parser.add_argument('--nlen', type=int, default=3, help='number of length candidates.')
parser.add_argument('--ntr', type=int, default=1, help='number of samples per length candidate.')
return parser.parse_args()
def parse_distributed_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="Dist FlowNMT")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
# arguments for flownmt model
parser.add_argument('--config', type=str, help='config file', required=True)
parser.add_argument('--batch_size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--eval_batch_size', type=int, default=4, metavar='N',
help='input batch size for eval (default: 4)')
parser.add_argument('--init_batch_size', type=int, default=1024, metavar='N',
help='number of instances for model initialization (default: 1024)')
parser.add_argument('--batch_steps', type=int, default=1, metavar='N',
help='number of steps for each batch (the batch size of each step is batch-size / steps (default: 1)')
parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train')
parser.add_argument('--kl_warmup_steps', type=int, default=10000, metavar='N',
help='number of steps to warm up KL weight(default: 10000)')
parser.add_argument('--init_steps', type=int, default=5000, metavar='N',
help='number of steps to train decoder (default: 5000)')
parser.add_argument('--seed', type=int, default=65537, metavar='S', help='random seed (default: 524287)')
parser.add_argument('--loss_type', choices=['sentence', 'token'], default='sentence',
help='loss type (default: sentence)')
parser.add_argument('--train_k', type=int, default=1, metavar='N', help='training K (default: 1)')
parser.add_argument('--log_interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--lr_decay', choices=['inv_sqrt', 'expo'], help='lr decay method', default='inv_sqrt')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 of Adam')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 of Adam')
parser.add_argument('--eps', type=float, default=1e-6, help='eps of Adam')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight for l2 norm decay')
parser.add_argument('--amsgrad', action='store_true', help='AMS Grad')
parser.add_argument('--grad_clip', type=float, default=0, help='max norm for gradient clip (default 0: no clip')
parser.add_argument('--model_path', help='path for saving model file.', required=True)
parser.add_argument('--data_path', help='path for data file.', default=None)
parser.add_argument('--src', type=str, help='source language code', required=True)
parser.add_argument('--tgt', type=str, help='target language code', required=True)
parser.add_argument('--create_vocab', action='store_true', help='create vocabulary.')
parser.add_argument('--share_all_embeddings', action='store_true', help='share source, target and output embeddings')
parser.add_argument("--subword", type=str, default="joint-bpe",
choices=['joint-bpe', 'sep-bpe', 'word', 'bert-bpe'])
parser.add_argument("--bucket_batch", type=int, default=0,
help="whether bucket data based on tgt length in batching")
parser.add_argument('--recover', type=int, default=-1, help='recover the model from disk.')
return parser.parse_args()
| 9,967 | 72.837037 | 143 | py |
flowseq | flowseq-master/experiments/distributed.py | import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import json
import signal
import threading
import torch
from flownmt.data import NMTDataSet
import experiments.options as options
from experiments.nmt import main as single_process_main
def create_dataset(args):
model_path = args.model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
result_path = os.path.join(model_path, 'translations')
if not os.path.exists(result_path):
os.makedirs(result_path)
vocab_path = os.path.join(model_path, 'vocab')
if not os.path.exists(vocab_path):
os.makedirs(vocab_path)
data_path = args.data_path
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
params = json.load(open(args.config, 'r'))
src_max_vocab = params['{}_vocab_size'.format(src_lang)]
tgt_max_vocab = params['{}_vocab_size'.format(tgt_lang)]
NMTDataSet(data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab,
subword=args.subword, create_vocab=True)
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
nproc_per_node = args_dict.pop('nproc_per_node')
nnodes = args_dict.pop('nnodes')
node_rank = args_dict.pop('node_rank')
# world size in terms of number of processes
dist_world_size = nproc_per_node * nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ
current_env["MASTER_ADDR"] = args_dict.pop('master_addr')
current_env["MASTER_PORT"] = str(args_dict.pop('master_port'))
current_env["WORLD_SIZE"] = str(dist_world_size)
create_vocab = args_dict.pop('create_vocab')
if create_vocab:
create_dataset(args)
args.create_vocab = False
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for local_rank in range(0, nproc_per_node):
# each process's rank
dist_rank = nproc_per_node * node_rank + local_rank
args.rank = dist_rank
args.local_rank = local_rank
process = mp.Process(target=run, args=(args, error_queue, ), daemon=True)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
for process in processes:
process.join()
def run(args, error_queue):
try:
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
main()
| 4,220 | 30.736842 | 107 | py |
DataCovVac | DataCovVac-main/covvac-code/run_make_adj.py | #!/usr/bin/env python
"""Load and save and adjacency matrix.
Load adjacency matrix and define partitions.
Compute:
-------
- adjacency matrix
- community structure
Todo:
----
- country discovery
"""
import csv
import json
import pathlib
import networkx as nx
import numpy as np
from scipy import sparse
import cov
def main():
"""Do the MAIN."""
data = cov.load_range("2020-01-01", "2021-10-01")
out_folder = pathlib.Path("data")
for tau in [-1, 0, 1]:
print(r": ", tau)
# check vaccine critic adjacency
# adjacency, nodes, discarted = cov.adjacency(data, tau=tau, fix_sources=True)
transition, sstate, nodes, discarted = cov.adjacency(
data, tau=tau, fix_sources="symmetrize", return_factors=True
)
txt = ""
adjacency = transition @ sparse.diags(sstate)
sparse.save_npz(out_folder / f"adjacency{txt}-tau_{tau}_matrix.npz", adjacency)
sparse.save_npz(
out_folder / f"adjacency{txt}-tau_{tau}_transition.npz", transition
)
np.savez_compressed(
out_folder / f"adjacency-tau_{tau}_steadystate.npz", steadystate=sstate
)
# check full graph
graph = nx.from_scipy_sparse_matrix(adjacency, create_using=nx.DiGraph)
community = cov.directed_partition(graph)
nodes = [
{"id": nid, "part": f"C{community[nindx]}", "indx": nindx}
for nindx, nid in nodes.items()
] + [
{"id": nid, "part": f"X{i}", "indx": None}
for i, comp in enumerate(discarted)
for nid in comp
]
with open(out_folder / f"adjacency{txt}-tau_{tau}_nodes.json", "wt") as fout:
json.dump({"nodes": nodes, "discarted": discarted}, fout)
inv_nodes = {
node["indx"]: node["id"] for node in nodes if node["indx"] is not None
}
edgelist = [
(inv_nodes[e1], inv_nodes[e2], w if w is not None else 1.0)
for e1, e2, w in graph.edges(data="weight")
]
with open(out_folder / f"adjacency{txt}-tau_{tau}_edgelist.tsv", "wt") as fout:
writer = csv.writer(fout, dialect=csv.excel_tab)
writer.writerows(edgelist)
if __name__ == "__main__":
main()
| 2,280 | 27.873418 | 87 | py |
DataCovVac | DataCovVac-main/covvac-code/run_stats_url_media.py | #!/usr/bin/env python3
"""
File: run_stats_url_media.py
Author: Mauro Faccin
Email: [email protected]
Description: Fraction of tweets and retweets with urls from the critics and media sets.
"""
import json
from dataclasses import dataclass, field
import numpy as np
import tqdm
from scipy import sparse
import cov
import cov_utils
@dataclass
class CommStats:
"""Class for community stats."""
ndays: int = 0
data_comm: list = field(default_factory=list)
data_feat: list = field(default_factory=list)
comm2indx: dict = field(default_factory=dict)
feat2indx: dict = field(default_factory=dict)
timedata: dict = field(default_factory=dict)
def update(self, part, feat, index):
"""Update inner state.
Parameters
----------
part : str
partition class
feat : str
feature
index : int
temporal index
"""
if feat == "":
return
self.comm2indx.setdefault(part, len(self.comm2indx))
self.feat2indx.setdefault(feat, len(self.feat2indx))
# temp counts of features
self.timedata.setdefault(feat, np.zeros(self.ndays))
self.timedata[feat][index] += 1
# partition to feture link
self.data_comm.append(part)
self.data_feat.append(feat)
def sparse(self):
"""Return a sparse matrix.
Returns
-------
matrix : sparse.csr_matrix
matrix of links from communities to features.
"""
return sparse.csr_matrix(
(
np.ones_like(self.data_comm),
(
[self.comm2indx[c] for c in self.data_comm],
[self.feat2indx[f] for f in self.data_feat],
),
),
shape=(len(self.comm2indx), len(self.feat2indx)),
dtype=int,
)
def sort(self, communities, features):
self.comm2indx = {c: i for i, c in enumerate(communities)}
self.feat2indx = {f: i for i, f in enumerate(features)}
def size(self):
return len(self.data_comm)
def temporal(self):
inv_map = {v: k for k, v in self.feat2indx.items()}
return np.stack([self.timedata[inv_map[i]] for i in sorted(inv_map)])
def iter_daily(day1, day2):
"""Iterate over tweets and retweets sorting by `created_at`.
Parameters
----------
day1 : str or date
day
day2 : str or date
day
Returns
-------
output : list
tweets and retweets.
"""
urls = cov.Urls()
dates = {d.isoformat(): id for id, d in enumerate(cov_utils.daterange(day1, day2))}
tweets = np.zeros((3, len(dates)))
retweets = np.zeros((3, len(dates)))
comm_stats = [
{
k: CommStats(len(dates))
for k in ["urls_pre", "urls_post", "urls_media", "hashtags"]
}
for _ in range(3)
]
partition = []
for tau in [-1, 0, 1]:
with open(f"data/adjacency-tau_{tau}_nodes.json", "rt") as fin:
partition.append({u["id"]: u["part"] for u in json.load(fin)["nodes"]})
urls_set = set()
hash_set = set()
for day in tqdm.tqdm(dates):
daily = cov.load_day(day)
for tid, tweet, rtws in daily.tweets.iter():
for i, (indx, isoday) in enumerate(check_urls(tweet, rtws, urls)):
if i == 0:
tweets[indx, dates[day]] += 1
else:
try:
retweets[indx, dates[isoday]] += 1
except KeyError:
pass
for kind, url, isoday, uid in check_url_stats(tweet, rtws, urls):
urls_set.add(url)
for ipart, part in enumerate(partition):
if uid in part and isoday in dates:
comm_stats[ipart][kind].update(part[uid], url, dates[isoday])
for hashtag, isoday, uid in check_hash_stats(tweet, rtws):
hash_set.add(hashtag)
for ipart, part in enumerate(partition):
if uid in part and isoday in dates:
comm_stats[ipart]["hashtags"].update(
part[uid], hashtag, dates[isoday]
)
urls_set = sorted(urls_set)
hash_set = sorted(hash_set)
for stats, part in zip(comm_stats, partition):
part_set = sorted(set(part.values()))
for kind, stat in stats.items():
if kind[:3] == "url":
stat.sort(part_set, urls_set)
else:
stat.sort(part_set, hash_set)
return (dates, tweets, retweets), comm_stats
def check_urls(tweet, rtws, urls):
url = set(urls.is_coded(tweet))
if len(url) == 0:
indx = 0
elif "PRE" in url or "POST" in url:
indx = 1
elif "MEDIA" in url:
indx = 2
yield indx, tweet["created_at"][:10]
for rtw in rtws:
yield indx, rtw["created_at"][:10]
def check_url_stats(tweet, rtws, urls):
for url in tweet.get("links", "").split("|"):
url = urls.netloc(url.lower())
if url in urls.pre:
kind = "urls_pre"
elif url in urls.post:
kind = "urls_post"
elif url in urls.media:
kind = "urls_media"
else:
kind = None
if kind is not None:
yield kind, url, tweet["created_at"][:10], tweet["from_user_id"]
for rtw in rtws:
yield kind, url, rtw["created_at"][:10], rtw["from_user_id"]
def check_hash_stats(tweet, rtws):
for hash in tweet.get("hashtags", "").split("|"):
yield hash, tweet["created_at"][:10], tweet["from_user_id"]
for rtw in rtws:
yield hash, rtw["created_at"][:10], rtw["from_user_id"]
def main():
"""Do the main."""
(days, tweets, retweets), comm_stats = iter_daily("2020-01-01", "2021-10-01")
cov_utils.dump_csv(
"data/daily_stats.tsv",
[
{
"day": day,
"twt_nourls": t[0],
"twt_crit": t[1],
"twt_media": t[2],
"rtwt_nourls": rt[0],
"rtwt_crit": rt[1],
"rtwt_media": rt[2],
}
for day, t, rt in zip(days, tweets.T, retweets.T)
],
)
for tau, data in zip([-1, 0, 1], comm_stats):
for key, dt in data.items():
print(tau, key)
sparse.save_npz(f"data/stats_comm_tau_{tau}_{key}.npz", dt.sparse())
with open(f"data/stats_comm_tau_{tau}_{key}_map.json", "wt") as fout:
json.dump({"comm2indx": dt.comm2indx, "feat2indx": dt.feat2indx}, fout)
keys = list(dt.timedata.keys())
cov_utils.dump_csv(
f"data/stats_comm_tau_{tau}_{key}_temp.tsv.gz",
[
dict(zip(keys, vals))
for vals in np.column_stack(list(dt.timedata.values()))
],
)
if __name__ == "__main__":
main()
| 7,113 | 27.918699 | 87 | py |
DataCovVac | DataCovVac-main/covvac-code/run_community_outreach.py | #!/usr/bin/env python3
"""Compute the probability of finding a critic or media tweet in communities.
And they outreach probabilities.
"""
import json
from collections import Counter
import numpy as np
from scipy import sparse
import cov
import cov_utils
def read_communities(tau):
"""Read community structure."""
with open(f"data/adjacency-tau_{tau}_nodes.json", "rt") as fin:
udata = {user["id"]: user for user in json.load(fin)["nodes"]}
comms = {}
for user in udata.values():
comms.setdefault(user["part"], set()).add(user["id"])
return comms, udata
def projector(data: cov.Data, ucomms: dict, tau: int = -1):
"""Project the adjacency matrix to the community structure.
Parameters
----------
data : cov.Data
data to project, may be a tuple (transition, steadystate, nmap)
ucomms : dict
dict of users, IDs as keys, dicts as values.
In the latter the partition is in the 'part' value
Returns
-------
adj : sparse.csr_matrix
adjacency matrix of the projected network
map : dict
index to community name map
"""
if isinstance(data, tuple):
transition, steadystate, nmap = data
else:
transition, steadystate, nmap, _ = cov.adjacency(
data, tau=tau, fix_sources="symmetrize", return_factors=True
)
adjacency = transition @ sparse.diags(steadystate)
# map user indx to comm
umap = {indx: ucomms[uid]["part"] for indx, uid in nmap.items()}
# communities
comms = set([u["part"] for u in ucomms.values()])
csize = Counter([u["part"] for u in ucomms.values()])
# comm to comm indes
comm_indx = {
comm: icomm
for icomm, comm in enumerate(
# sorted(comms, key=lambda x: (x[0], int(x.split("_")[0][1:])))
sorted(comms, key=lambda x: csize[x])
)
}
prj = sparse.csr_matrix(
(
np.ones(len(umap)),
(list(umap.keys()), [comm_indx[c] for c in umap.values()]),
)
)
p_ss = prj.T @ steadystate
p_adj = prj.T @ adjacency @ prj
# entry wise multiplication with identity
p_in_in = p_adj.multiply(sparse.eye(p_adj.shape[0]))
p_out = p_adj - p_in_in
p_in_out = p_out.sum(0).A1
p_out_in = p_out.sum(1).A1
return {
"part": comm_indx,
"prob_steady": p_ss,
"prob_in_in": p_in_in @ np.ones(p_adj.shape[0]),
"prob_in_out": p_in_out,
"prob_out_in": p_out_in,
}
def main(tau=-1):
"""Do the main."""
print(f"============\nTAU {tau}\n============")
communities, ucomms = read_communities(tau)
# extract data that only contains given urls
urls = cov.Urls()
data = {}
data["full"] = cov.load_range("2020-01-01", "2021-10-01")
# data["full"] = cov.load_range("2020-01-01", "2020-04-01")
data["crit"] = data["full"].extract(
tweet_filter=lambda x: {"PRE", "POST"} & set(urls.is_coded(x))
)
data["media"] = data["full"].extract(
tweet_filter=lambda x: "MEDIA" in set(urls.is_coded(x))
)
out = {}
for kind, dt in data.items():
print(kind)
output = projector(dt, ucomms, tau)
for prob, vector in output.items():
if prob == "part":
key = "part"
else:
key = "_".join([kind, prob])
out[key] = vector
keys = list(out.keys())
cov_utils.dump_csv(
f"data/community_outreach-tau_{tau}.tsv",
[dict(zip(keys, vals)) for vals in zip(*out.values())],
)
if __name__ == "__main__":
for tau in [-1, 0, 1]:
main(tau)
| 3,656 | 26.496241 | 77 | py |
DataCovVac | DataCovVac-main/covvac-code/run_engagement.py | #!/usr/bin/env pyhton
"""Compute users engagement as an SIS model."""
from datetime import date, timedelta
import cov
import cov_utils as utils
from tqdm import tqdm
# classes
class Engaged:
"""Collect user engagement."""
def __init__(self, window=3):
self.__data__ = []
self.__cache__ = {
"uids": {"eng": set(), "tot": set(), "media": set()},
"days": {},
}
self.__window__ = window
self.__urls__ = cov.Urls()
def add(self, data: cov.Data, day: date):
"""Import data from day.
Parameters
----------
data : cov.Data
daily dataset
day : date
day
"""
isoday = day.isoformat()
self.__cache__["days"][isoday] = {
"engaged": set(),
"engaged_media": set(),
"tot_users": set(),
}
for tid, tweet in data.tweets.items():
# OP ID
uid = tweet["from_user_id"]
# Retweeters IDs
rtw_uids = {rt["from_user_id"] for rt in data.tweets.retweets(tweet["id"])}
self.__cache__["days"][isoday]["tot_users"].add(uid)
self.__cache__["days"][isoday]["tot_users"] |= rtw_uids
if "links" not in tweet:
continue
codes = self.__urls__.is_coded(tweet)
if "PRE" in codes or "POST" in codes:
self.__cache__["days"][isoday]["engaged"].add(uid)
self.__cache__["days"][isoday]["engaged"] |= rtw_uids
if "MEDIA" in codes:
self.__cache__["days"][isoday]["engaged_media"].add(uid)
self.__cache__["days"][isoday]["engaged_media"] |= rtw_uids
self.__update__(day)
def __update__(self, day: date):
"""Update and compute paramenters."""
isoday = day.isoformat()
days = utils.daterange(day - timedelta(days=self.__window__ - 1), day + timedelta(days=1))
eng = self.__get_engaged__(days)
tot = self.__get_engaged__(days, kind="tot_users")
media = self.__get_engaged__(days, kind="engaged_media")
# people becoming engaged
new_eng = len(eng - self.__cache__["uids"]["eng"])
new_media = len(media - self.__cache__["uids"]["media"])
# people becoming disengaged
dis_eng = len(self.__cache__["uids"]["eng"] - eng)
dis_media = len(self.__cache__["uids"]["eng"] - media)
alpha, beta = sis_params(
[new_eng, dis_eng], len(eng), len(tot | self.__cache__["uids"]["tot"])
)
data = {
"eng": len(eng),
"tot": len(tot),
"new_eng": new_eng,
"dis_eng": dis_eng,
"alpha": alpha,
"beta": beta,
"rt": alpha / beta if beta > 0 else 0.0,
"day": isoday,
}
alpha, beta = sis_params(
[new_media, dis_media], len(media), len(tot | self.__cache__["uids"]["tot"])
)
data |= {
"media": len(media),
"new_media": new_media,
"dis_media": dis_media,
"alpha_media": alpha,
"beta_media": beta,
"rt_media": alpha / beta if beta > 0 else 0.0,
}
self.__data__.append(data)
self.__cache__["uids"]["eng"] = eng
self.__cache__["uids"]["media"] = media
self.__cache__["uids"]["tot"] = tot
def __get_engaged__(self, days, kind="engaged"):
eng = set()
for day in days:
if day.isoformat() in self.__cache__["days"]:
eng.update(self.__cache__["days"][day.isoformat()][kind])
return eng
def data(self):
"""Return all data.
Yields
------
data : dicts
all data as dicts
"""
yield from self.__data__
def sis_params(di, i, n):
"""Compute alpha and beta for SIS.
Returns
-------
alpha : float
infection rate
beta : float
recovery rate
"""
# compute SI/N
sin = (n - i) * i / n
if i == 0 or i == n:
alpha = 0
else:
alpha = di[0] / sin
if i == 0:
beta = 0
else:
beta = max(di[1], 1) / i
return alpha, beta
def main(window):
"""Do the main."""
print("Window", window)
engaged = Engaged(window=window)
for day in tqdm(utils.daterange("2020-01-01", "2021-10-01")):
daily = cov.load_day(day)
engaged.add(daily, day)
data = list(engaged.data())
utils.dump_csv(f"data/engagement-tau_{window}.tsv", data)
if __name__ == "__main__":
for win in [3, 5, 7]:
main(win)
| 4,681 | 25.754286 | 98 | py |
DataCovVac | DataCovVac-main/covvac-code/cov_utils.py | #!/usr/bin/env python3
"""Utilities.
File: cov_utils.py
Author: Mauro
Github: https://github.com/maurofaccin
Description: Utility functions for analysis
"""
import csv
import gzip
import pathlib
from datetime import date, timedelta
import networkx as nx
import numpy as np
from community import best_partition
BASENAME = pathlib.Path(__file__)
if BASENAME.is_symlink():
BASENAME = BASENAME.readlink()
BASENAME = BASENAME.parent
CORPUS = BASENAME / "corpus_merged"
TWEET_FIELDS = [
("id",),
("time", "timestamp_utc"),
("created_at", "local_time"),
("text",),
("from_user_name", "user_name"),
("from_user_id", "user_id"), # OP of the tweet
("to_user_id", "to_userid"),
("quoted_id",),
("quoted_user_id",),
("mentioned_user_ids", "mentioned_ids"),
("location", "user_location"),
("links",),
("hashtags",),
]
RETWEET_FIELDS = [
("id",),
("retweeted_id",),
("time", "timestamp_utc"),
("from_user_id", "user_id"), # OP of the retweet
("created_at", "local_time"),
]
USER_FIELDS = [
("from_user_name", "user_name"),
("from_user_id", "user_id"),
("from_user_description", "user_description"),
("from_user_url", "user_url"),
("from_user_realname", "user_screen_name"),
("location", "user_location"),
("from_user_followercount", "user_followers"),
]
def translate_row(row) -> tuple:
"""Use the same fields to uniform output.
Parameters
----------
row : dict
a raw row from csv
Returns
-------
tweet: dict or None
the tweet data
retweet: dict or None
the retweet data
user: dict
the user data
"""
if "time" in row:
# old keys
index = 0
else:
# new keys
index = -1
if row.get("retweeted_id", "") == "":
# this is a tweet
tweet = {k[index]: row[k[index]] for k in TWEET_FIELDS}
retweet = None
else:
# this is a retweet
tweet = None
retweet = {k[index]: row[k[index]] for k in RETWEET_FIELDS}
assert (tweet is None) ^ (retweet is None)
user = {k[index]: row[k[index]] for k in USER_FIELDS}
return tweet, retweet, user
def daterange(first, last, step=1):
"""Return a list of dates between first and last (excluded).
Parameters
----------
first : str or datetime.date
fisrt date
last : str or datetime.date
last date (excluded)
step : int
step in days
(Default value = 1)
Returns
-------
days : list[datetime.date]
a ordered list of days
"""
if isinstance(first, str):
first = date.fromisoformat(first)
if isinstance(last, str):
last = date.fromisoformat(last)
return [first + timedelta(days=x) for x in range(0, (last - first).days, step)]
def filename(day, corpus="coronavirus"):
"""Return a filename.
Parameters
----------
day : date
day
corpus :
(Default value = 'coronavirus')
Returns
-------
filename : string
a filename
"""
return f"tweets-{corpus}_{day.isoformat()}.csv.gz"
def tonumber(string):
"""Convert to number if possible.
Parameters
----------
string :
a string
Returns
-------
number :
a number
"""
if string is None:
return None
if isinstance(string, list):
print(string)
try:
val = int(string)
except ValueError:
try:
val = float(string)
except ValueError:
val = str(string)
except OverflowError:
val = str(string)
return val
def values2number(dictionary):
"""Convert values to number if possible.
This is a shallow map (it won't act recursively)
Parameters
----------
dictionary : a dict
a dictionary
Returns
-------
intdict : dict
same dictionary with numbers as numbers
"""
return {k: tonumber(v) for k, v in dictionary.items() if v != ""}
def is_float(var):
if isinstance(var, (list, np.ndarray)):
return [is_float(x) for x in var]
try:
float(var)
except ValueError:
return False
except TypeError:
return False
return True
def removesuffix(text, suffix):
"""
Parameters
----------
text :
suffix :
Returns
-------
"""
if text.endswith(suffix):
return text[len(suffix) :]
return text
def removeprefix(text, prefix):
"""
Parameters
----------
text :
prefix :
Returns
-------
"""
if text.startswith(prefix):
return text[len(prefix) :]
return text
def entropy(iterable):
"""Return the entropy.
Parameters
----------
iterable : iterable
a list of numbers
Returns
-------
entropy : float
the entropy of the normalized distribution
"""
p = np.array([v for v in iterable if v > 0.0], dtype=np.float32)
p /= p.sum()
return -(p * np.log2(p)).sum()
def smooth_convolve(line, window=3):
"""Return a smoothed line computed with the hann kernel.
Parameters
----------
line : iterable
values to be smoothed (1d array)
window : int, default=3
lenght of the sliding window
Returns
-------
line : np.ndarray
the smoothed line
"""
assert window % 2 == 1, "Give me an odd window"
half_window = window // 2
extended_line = np.concatenate(
(np.full((half_window,), line[0]), line, np.full((half_window,), line[-1]))
)
kernel = np.hanning(window)
return np.convolve(extended_line, kernel, mode="valid") / np.sum(kernel)
def load_csv(filepath, transpose=False, headers=True):
"""Load from csv.
Parameters
----------
filepath : str or pathlib.Path
the file path
transpose : bool (default=False)
if True the output will be dict of lists
headers : bool (default=True)
if the file has a header row
Returns
-------
data : dict or list
return the data in the csv file as list of dicts.
If transpose is `True` the return data will be a dict of lists.
"""
if isinstance(filepath, str):
filepath = pathlib.Path(filepath)
suffixes = filepath.suffixes
# use gzip if needed
if suffixes[-1] == ".gz":
open_with = gzip.open
suffixes = suffixes[:-1]
else:
open_with = open
# use tsv if suffix
if suffixes[-1] == ".tsv":
dialect = csv.excel_tab
else:
dialect = csv.excel
with open_with(filepath, "rt") as fin:
if headers:
reader = csv.DictReader(fin, dialect=dialect)
fields = reader.fieldnames
data = [values2number(row) for row in reader]
else:
reader = csv.reader(fin, dialect=dialect)
data = [values2number({k: v for k, v in enumerate(row)}) for row in reader]
fields = list(data[0].keys())
if transpose:
return {k: [d.get(k, None) for d in data] for k in fields}
return data
def dump_csv(filepath, data):
"""Write to csv.
Parameters
----------
filepath : str or pathlib.Path
the file path
data : list
an iterable over dictionaries
"""
if isinstance(filepath, str):
filepath = pathlib.Path(filepath)
suffixes = filepath.suffixes
# use gzip if needed
if suffixes[-1] == ".gz":
open_with = gzip.open
suffixes = suffixes[:-1]
else:
open_with = open
# use tsv if suffix
if suffixes[-1] == ".tsv":
dialect = csv.excel_tab
else:
dialect = csv.excel
with open_with(filepath, "wt") as fout:
writer = csv.DictWriter(fout, fieldnames=list(data[0].keys()), dialect=dialect)
writer.writeheader()
writer.writerows(data)
def cluster_tseries(matrix, array=None, temp_sort=False):
"""Cluster temporal series.
Parameters
----------
matrix :
A mantrix of NxT nodes by time steps
array :
Array to sort with the matrix
(Default value = None)
temp_sort :
If True, sort communities based on time.
(Default value = False)
Returns
-------
"""
nn, nt = matrix.shape
# compute covariance
cov = np.cov(matrix)
cov[cov < 0] = 0
# build a graph from positive values
graph = nx.from_numpy_array(cov)
# partition with modularity
part = best_partition(graph)
parts = {}
for n, p in part.items():
parts.setdefault(p, []).append(n)
# sort partitions depending on maximum values happening earlier or later
maxes = matrix.argmax(axis=1)
parts = {
x[0]: x[1][1] for x in enumerate(sorted(parts.items(), key=lambda x: maxes[x[1]].mean()))
}
part = {node: p for p, nodes in parts.items() for node in nodes}
# label nodes
labels = [part[n] for n in range(nn)]
# indx = np.argsort(labels, kind='stable')
indx = [node for nodes in parts.values() for node in nodes]
# sort labels by indx
labels = [labels[i] for i in indx]
# pick the first indexes of each class
sep = [
0,
] + [i + 1 for i, (l1, l2) in enumerate(zip(labels, labels[1:])) if l1 != l2]
if array is None:
return matrix[indx, :], sep
return matrix[indx, :], np.asarray(array)[indx], sep
def cluster_first_axis(matrix, temp_sort=False):
"""Cluster temporal series.
Parameters
----------
matrix :
A mantrix of NxT nodes by time steps
array :
Array to sort with the matrix
(Default value = None)
temp_sort :
If True, sort communities based on time.
(Default value = False)
Returns
-------
partition : list
list of lists of indices
"""
nn, nt = matrix.shape
# compute covariance
cov = np.cov(matrix)
cov[cov < 0] = 0
# build a graph from positive values
graph = nx.from_numpy_array(cov)
# partition with modularity
part = best_partition(graph)
parts = {}
for n, p in part.items():
parts.setdefault(p, []).append(n)
return [sorted(nodes) for nodes in parts.values()]
def get_keywords(topic: str, get_minimal: bool = False, min_len: int = 0):
"""Load filtering words.
Get all possible keywords plus the minimum set of keywords contained in the full set.
Parameters
----------
topic : str
the topic (one of `covid` or `vaccine`)
get_minimal : bool
(Default value = False)
return long and short sets
(long has all keyworkds, all keys in long include at least one key in short)
min_len : int
(Default value = 0)
Minimum keyword lenght to consider.
Returns
-------
topics : dict
a dictionary with a list of self excluding tags (`short`) and all tags (`long`)
"""
filepath = BASENAME / f"data/keys-{topic}.txt"
with open(filepath, "rt") as fin:
long = [line.lower().strip() for line in fin]
# remove shorter keys
long = set([k for k in long if len(k) >= min_len])
if not get_minimal:
return long
short = set()
for flt_long in long:
to_remove = []
for flt in short:
if flt in flt_long:
break
if flt_long in flt:
to_remove.append(flt)
else:
for flt2rm in to_remove:
short.remove(flt2rm)
short.add(flt_long)
return {"long": long, "short": short}
def daily_tweets(day: date):
"""List all tweets from a given day.
Warning: it has to be run in one go.
Parameters
----------
day : date
the day of interest
Yields
------
row : dict
a row of data (one tweet or retweet)
"""
for fname in BASENAME.parent.parent.glob(day.strftime("corpus_*/**/*%Y-%m-%d.csv.gz")):
with gzip.open(fname, "rt") as fin:
# yield fin.readline()
for row in csv.DictReader(
[
line.replace("\0", " ").replace("\n", " ").replace("\r", " ").strip()
for line in fin
]
):
# sanitize null chars
yield dict(row)
def filter_row(row: list, keywords: dict, fmt="all", check_lang=True):
"""Return True if any of the keywords is in the row.
Warning: only French tweets are used.
Parameters
----------
row : list
list of rows
keywords : dict
dict of keywords
fmt : str, (default = 'all')
any of 'all' or 'any'
check_lang : bool
If to check lang.
Returns
-------
contains : bool
True is contains the keywords either in row['text'] or in row['hashtags']
"""
if check_lang and row["lang"] != "fr":
return False
for topic, kwds in keywords.items():
if fmt == "all" and not __keyword_in_row__(kwds["long"], kwds["short"], row):
return False
elif fmt == "any" and __keyword_in_row__(kwds["long"], kwds["short"], row):
return True
if fmt == "all":
return True
return False
def __keyword_in_row__(key_long, key_short, row):
hts = set(row.get("hashtags", "").split("|"))
if hts & key_long or __intext__(row["text"].lower(), key_short):
return True
return False
def __intext__(text, kwds):
for word in kwds:
if word in text:
return True
return False
def load_cases(country: str, day1="2020-01-01", day2="2021-10-01", transpose=False):
with open(BASENAME / "data/WHO-COVID-19-global-data.csv", "rt") as fin:
reader = csv.DictReader(fin, dialect=csv.excel)
headers = list(reader.fieldnames)
data = {
x["Date_reported"]: values2number(x) | dict(day=date.fromisoformat(x["Date_reported"]))
for x in reader
if x["Country_code"] == country.upper()
}
headers.append("day")
with open(BASENAME / "data/table-indicateurs-open-data-france.csv", "rt") as fin:
reader = csv.DictReader(fin)
for row in reader:
day = date.fromisoformat(row["extract_date"])
if day.isoformat() in data:
data[day.isoformat()].update(values2number(row))
headers += list(reader.fieldnames)
data = [data[dt.isoformat()] for dt in daterange(day1, day2) if dt.isoformat() in data]
if transpose:
return {k: [d.get(k, None) for d in data] for k in headers}
return data
def get_communities(tau=-1):
"""Return a dictionary of uids and communities."""
cache = BASENAME / "data/partitions_tau_{tau}.tsv"
if cache.is_file():
print("loading_cache")
part = load_csv(cache, transpose=False)
return dict([u.values() for u in part])
else:
users = load_csv(f"../data/users-tau_{tau}.tsv.gz", transpose=False)
part = [{"id": u["id"], "part": u["part"]} for u in users]
dump_csv(cache, part)
return dict([u.values() for u in part])
| 15,188 | 22.957413 | 99 | py |
DataCovVac | DataCovVac-main/covvac-code/cov.py | #!/usr/bin/env python
"""Utility functions."""
import csv
import gzip
import pathlib
import subprocess as sp
import tempfile
from collections import Counter
from dataclasses import dataclass, field
from datetime import date
import networkx as nx
import numpy as np
import pycountry
import pygenstability as stability
from scipy import sparse
from tqdm import tqdm
import cov_utils as utils
COUNTRIES = {
"CM",
"FR",
"CD",
"CA",
"MG",
"CI",
"NE",
"BF",
"ML",
"SN",
"TD",
"GN",
"RW",
"BE",
}
COUNTRIES = {c: pycountry.countries.get(alpha_2=c) for c in COUNTRIES}
class Tweets(dict):
"""A dictionary of tweets."""
def __init__(self):
"""Initialize."""
super().__init__(self)
self.__keys__ = []
self.__retweets__ = {}
self.__retweet_keys__ = {}
def update_from(self, fields, tweet_data):
"""Import tweets.
Parameters
----------
fields : list
list of keys
tweet_data : iterable
iterable of dicts (each representing a tweet)
Returns
-------
users : dict
users IDs involved with corresponding tweet ids
"""
self.__keys__ = list(fields)
users = {}
for tweet in tweet_data:
tweet = utils.values2number(tweet)
tid = tweet["id"]
self.setdefault(tid, tweet)
uid = tweet["from_user_id"]
users.setdefault(uid, []).append(tid)
return users
def update_retweets_from(self, fields, retweet_data):
"""Import tweets.
Parameters
----------
fields : list
list of keys
retweet_data : iterable
iterable of dicts (each representing a retweet)
Returns
-------
users : dict
users IDs involved with corresponding retweet IDs
"""
self.__retweet_keys__ = list(fields)
users = {}
for retweet in retweet_data:
retweet = utils.values2number(retweet)
# get the user that tweeted
tid = retweet["retweeted_id"]
if tid not in self:
# ignore retweets which original tweet is absent
continue
self.__retweets__.setdefault(tid, []).append(tuple(retweet.values()))
uid = retweet["from_user_id"]
users.setdefault(uid, []).append((tid, retweet["id"]))
return users
def iter(self):
"""Iterate over tweets.
Yields
------
tweets : tuple
tuple of (Tweet ID, Tweet data, retweet list)
"""
yield from [(tid, t, self.retweets(tid)) for tid, t in self.items()]
def retweets(self, tid=None):
"""Return a list of retweets as dicts."""
if tid is None:
return [
dict(zip(self.__retweet_keys__, rts))
for rtss in self.__retweets__.values()
for rts in rtss
]
return [dict(zip(self.__retweet_keys__, rts)) for rts in self.__retweets__.get(tid, [])]
def n_retweets(self, tid=None):
"""Return the number of retweets."""
if tid is None:
return {tid: len(self.__retweets__.get(tid, [])) for tid in self}
return len(self.__retweets__.get(tid, []))
def urls(self, retweets=False):
"""Return the number of times each urls has been used.
Parameters
----------
retweets : bool
(Default value = False)
If set to True, add the retweets in the count
Returns
-------
urls : collections.Counter
the counts.
"""
urls = Counter()
for tid, tweet in self.items():
url = Urls.netloc(tweet.get("links", ""))
if url != "":
if retweets:
urls[url] += self.n_retweets(tid=tid) + 1
else:
urls[url] += 1
return urls
@property
def nt(self):
"""Return the number of tweets.
Returns
-------
ntweets : int
number of tweets
"""
return len(self)
@property
def nrt(self):
"""Return the number of retweets.
Returns
-------
nretweets : int
number of retweets
"""
return sum([len(rts) for _, _, rts in self.iter()])
class Users(dict):
"""A dictionary of users."""
def update_from(self, users: dict, fields: list, user_data):
"""Import tweets.
Parameters
----------
users : dict
set of user IDs to restrict to.
fields : list
list of keys
user_data : iterable
iterable of dicts (each representing a tweet)
"""
self.__keys__ = list(fields)
for user in user_data:
user = utils.values2number(user)
uid = user["from_user_id"]
if uid in users["tweets"] or uid in users["retweets"]:
# store tweets and retweets done by user
self.setdefault(uid, user)
self[uid].setdefault("data_tweets", []).extend(users["tweets"].get(uid, []))
self[uid].setdefault("data_retweets", []).extend(users["retweets"].get(uid, []))
for uid in users["tweets"]:
assert uid in self, f"Some users are not present. {uid}"
for uid in users["retweets"]:
assert uid in self, f"Some users are not present. {uid}"
def h_index(self, tweets: Tweets):
"""Return the H-index of users.
Number of tweets with at least the same number of retweets.
Parameters
----------
tweets : Tweets
Returns
-------
rank : Counter
the rank of the users
"""
rank = Counter(
{
uid: hindex([tweets.n_retweets(tid=tid) for tid in u["data_tweets"]])
for uid, u in self.items()
}
)
return +rank
@dataclass
class Data:
"""Database of all tweets, retweets and users."""
users: Users = field(default_factory=Users)
tweets: Tweets = field(default_factory=Tweets)
def add_day(self, day):
"""Load a day from the database.
Parameters
----------
day : str or date
the day
"""
if isinstance(day, (date,)):
day = day.isoformat()
user_ids = {}
# add tweets
fname = utils.CORPUS / f"{day}-tweets.csv.gz"
user_ids["tweets"] = self.__tweets_from_file__(fname)
# add retweets
fname = utils.CORPUS / f"{day}-retweets.csv.gz"
user_ids["retweets"] = self.__retweets_from_file__(fname)
# add users
fname = utils.CORPUS / f"{day}-users.csv.gz"
self.__users_from_file__(fname, user_ids)
def hashtags(self):
"""Return hashtags with corresponding tweets."""
hts = [
(ht, tid)
for tid, tweet in self.tweets.items()
for ht in tweet.get("hashtags", "").split("|")
]
hts_dict = {}
for ht, tid in hts:
hts_dict.setdefault(ht, []).append(tid)
return hts_dict
def hashtags_rank(self):
"""Return the h-index of hashtags."""
t_len = self.tweets.n_retweets()
hts = {ht: [t_len[tid] for tid in tids] for ht, tids in self.hashtags().items()}
rank = Counter({ht: hindex(tlen) for ht, tlen in hts.items()})
del rank[""]
return rank
def extract(self, tweet_filter=None, user_filter=None):
"""Extract a subset of tweets.
Parameters
----------
tweet_filter :
(Default value = None)
user_filter :
(Default value = None)
Returns
-------
data : Data
a database with extracted tweets and users.
"""
if tweet_filter is None:
def tweet_filter(x):
return True
if user_filter is None:
def user_filter(x):
return True
data = Data()
users = {}
users["tweets"] = data.tweets.update_from(
self.tweets.__keys__,
[
tweet
for tweet in self.tweets.values()
if tweet_filter(tweet) and user_filter(tweet["from_user_id"])
],
)
tids = list(data.tweets.keys())
users["retweets"] = data.tweets.update_retweets_from(
self.tweets.__retweet_keys__,
[
rtw
for tid in tids
for rtw in self.tweets.retweets(tid)
if user_filter(rtw["from_user_id"])
],
)
data.users.update_from(
users,
self.users.__keys__,
[{ku: vu for ku, vu in u.items() if ku[:4] != "data"} for u in self.users.values()],
)
return data
def __tweets_from_file__(self, fname: pathlib.Path):
users = {}
if fname.is_file():
with gzip.open(fname, "rt") as fin:
reader = csv.DictReader(fin)
users = self.tweets.update_from(reader.fieldnames, reader)
return users
def __retweets_from_file__(self, fname: pathlib.Path):
users = {}
if fname.is_file():
with gzip.open(fname, "rt") as fin:
reader = csv.DictReader(fin)
users = self.tweets.update_retweets_from(reader.fieldnames, reader)
return users
def __users_from_file__(self, fname: pathlib.Path, user_ids: dict):
if fname.is_file():
with gzip.open(fname, "rt") as fin:
reader = csv.DictReader(fin)
self.users.update_from(user_ids, reader.fieldnames, reader)
def __str__(self):
return f"""Tweet data
tweets: {len(self.tweets)} / {len(self.tweets.__retweets__)}
retweets: {self.tweets.nrt}
users: {len(self.users)}
"""
@dataclass
class Urls:
"""Load coded URLs."""
pre: set = field(default_factory=set)
post: set = field(default_factory=set)
media: set = field(default_factory=set)
codes: list = field(default_factory=list)
def __post_init__(self):
"""Initialize Urls bag."""
# load from file
urls = utils.load_csv(utils.BASENAME / "data/coding_urls.csv")
self.pre |= {url["Label"].lower() for url in urls}
urls = utils.load_csv(utils.BASENAME / "data/coding_last.tsv")
for row in urls:
if row["code"] == "COVID":
self.post.add(row["url"].lower())
else:
self.pre.add(row["url"].lower())
urls = utils.load_csv(utils.BASENAME / "data/media-nosocial.txt", headers=False)
self.media |= {url[0].lower() for url in urls}
self.codes = ["PRE", "POST", "MEDIA"]
@staticmethod
def netloc(url: str):
"""Return the base url.
Stripped of:
- 'http[s]://'
- 'www.'
- anything after the first '/'
- anything after the first '?'
basically a netloc without 'www.'
Parameters
----------
url : str
the URL to check
Returns
-------
netloc : str
the netloc of the given URL
"""
if url[:4] == "http":
netloc = url.split("/", maxsplit=3)[2]
else:
netloc = url.split("/", maxsplit=1)[0]
netloc = utils.removeprefix(netloc, "www.")
return netloc.split("?")[0]
def is_coded(self, tweet: dict):
"""Return if the tweet contins a coded link.
Parameters
----------
tweet : dict
the tweet data (should contain `links` as key)
Returns
-------
codes : list
list of codes (strings with values `PRE`, `POST` or `MEDIA`)
"""
if tweet.get("links", "") == "":
return []
coded = set()
for url in tweet["links"].split("|"):
netloc = self.netloc(url.lower())
if netloc in self.pre:
coded.add("PRE")
elif netloc in self.post:
coded.add("POST")
elif netloc in self.media:
coded.add("MEDIA")
return coded
def load_day(day, data=None, strict=False):
"""Load a day from the database.
Parameters
----------
day : str or date
the day
data : Data
if provided, append to this data (Default value = None)
strict : bool
whether to load also retweets from a different day. (Default value = False)
Returns
-------
data : Data
"""
if data is None:
data = Data()
data.add_day(day)
return data
def load_range(day1, day2=date.today(), strict=False):
"""Load a range of days.
Parameters
----------
day1 : str or date
First day
day2 : str or date, default: today
Last day (excluded) (Default value = date.today())
strict : bool
NotImplemented (Default value = False)
Returns
-------
data : Data
data
"""
data = Data()
ranger = tqdm(utils.daterange(day1, day2), leave=False)
for day in ranger:
ranger.set_description(desc=str(day))
data.add_day(day)
return data
def directed_partition(graph):
"""Wrap Stability at t=1 for pygenstability.
Parameters
----------
graph : nx.DiGraph
The graph to partition.
Returns
-------
partition : dict
Dictionary of nodes -> partition.
Partition tags are sorted by partition size.
"""
i2n = dict(enumerate(graph.nodes()))
adj = nx.to_scipy_sparse_matrix(graph, weight="weight", format="csr")
parts = stability.run(adj, n_workers=8, times=[1.0])
# returns a list of community tags per node.
return {i2n[node]: partition for node, partition in enumerate(parts["community_id"][0])}
def directed_partition_louvain(graph):
"""Wrap DirectedLouvain.
directed Louvain algorithm from: https://github.com/nicolasdugue/DirectedLouvain
Parameters
----------
graph : nx.DiGraph
The graph to partition.
Returns
-------
partition : dict
Dictionary of nodes -> partition.
Partition tags are sorted by partition size.
"""
n2i = {n: i for i, n in enumerate(graph.nodes())}
i2n = dict(enumerate(graph.nodes()))
louvainbin = pathlib.Path("~/codes/DirectedLouvain/bin").expanduser()
tmp = tempfile.NamedTemporaryFile(mode="wt", delete=True)
for e in graph.edges(data="weight"):
print(n2i[e[0]], n2i[e[1]], e[2] if e[2] is not None else 1.0, file=tmp)
tmp.flush()
tmpfile = pathlib.Path(tmp.name)
binfile = tmpfile.with_suffix(".bin")
weightfile = tmpfile.with_suffix(".weight")
cmd = [louvainbin / "convert", "-i", tmp.name, "-o", binfile, "-w", weightfile]
sp.run(cmd, capture_output=True)
tmp.close() # close and remove the temporary file
cmd = [louvainbin / "community", binfile, "-l", "-1", "-w", weightfile]
job = sp.run(cmd, capture_output=True)
binfile.unlink() # remove binary file
weightfile.unlink() # remove weight file
# resolve last level of the dendrogram.
new_part = {
n: {
n,
}
for n in i2n
}
part = {}
for row in job.stdout.decode().splitlines():
node, partition = map(int, row.split())
if node == 0:
assert len(part) == 0
part = new_part.copy()
new_part = dict()
new_part.setdefault(partition, set())
new_part[partition] |= part.pop(node)
new_part = sorted(new_part.values(), key=len, reverse=True)
part = {i2n[node]: partition for partition, nodes in enumerate(new_part) for node in nodes}
# sometimes I loose one node
if len(part) < len(graph):
for node in set(graph.nodes()) - set(part.keys()):
part[node] = len(part)
return part
def hyper_edges(data: Data):
"""Compute the hyper_edges of the system.
Parameters
----------
data : Data
system database
Returns
-------
usermap : dict
users that should go to the adjacency. May be more or less than those in data
tails : sparse
entering nodes of each hyper-edge
heads : sparse
exit nodes of each hyper-edge
"""
# user id -> index
# removing users that never get retweeted
uids = set(t["from_user_id"] for _, t, rts in data.tweets.iter() if len(rts) > 0) | set(
rt["from_user_id"] for _, _, rts in data.tweets.iter() for rt in rts
)
# nodemap = {uid: uindx for uindx, uid in enumerate(data.users)}
nodemap = {uid: uindx for uindx, uid in enumerate(uids)}
# each node is a user
nnodes = len(nodemap)
print("BUILDING HYPER-GRAPH: node number ->", nnodes)
# each tweet is an hyper edge
tweets = [
(tid, t["from_user_id"], [rt["from_user_id"] for rt in rts])
for tid, t, rts in data.tweets.iter()
if len(rts) > 0 # if there are retweets
]
nedges = len(tweets)
print("BUILDING HYPER-GRAPH: hyper-edge number ->", nedges)
# entry point of the hyper edges (tail of the arrow)
_tails = sparse.coo_matrix(
(np.ones(nedges), ([nodemap[uid] for _, uid, _ in tweets], np.arange(nedges))),
shape=(nnodes, nedges),
)
retweets = [(tindx, rt_uid) for tindx, (_, _, rts) in enumerate(tweets) for rt_uid in rts]
print("BUILDING HYPER-GRAPH: hyper-edge total size ->", len(retweets))
# exit point of the hyper edges (head of the arrow)
_heads = sparse.coo_matrix(
(
np.ones(len(retweets)),
(
[nodemap[rt_uid] for _, rt_uid in retweets],
[tindx for tindx, _ in retweets],
),
),
shape=(nnodes, nedges),
)
return (nodemap, _tails.tocsc(), _heads.tocsc())
def __interaction_adjacency__(tails, heads, tau=-1):
"""Compute the weights of the interaction adjacency matrix.
Only the corresponding transition matrix should be considered.
Parameters
----------
tails : sparse matrix
heads : sparse matrix
tau : {-1, 0, 1}
parameter
tau = 0 -> project each hyper_edge to a clique
tau = -1 -> each hyper edge is selected with the same prob (independently from cascade size)
tau = 1 -> hyper edges with larger cascades are more probable.
(Default value = -1)
Returns
-------
adjacency matrix : sparse
"""
# B_{\alpha, \alpha}
# get the exit size of each hyper edge (number of vertices involved)
hyper_weight = heads.sum(0).A1
# here we may have zeros entries for tweets that have never been retweeted
hyper_weight[hyper_weight > 0] = hyper_weight[hyper_weight > 0] ** tau
# put that on a diagonal matrix
hyper_weight = sparse.diags(hyper_weight, offsets=0)
print(tails.shape)
print(hyper_weight.shape)
print(heads.shape)
# compute the tails -> heads weighted entries (propto probability if symmetric)
return tails @ hyper_weight @ heads.T
def __fix_sources__(matrix: sparse.csr_matrix):
"""Add a `Twitter basin` node to keep the dynamics going.
This add a node (last one).
Parameters
----------
matrix : sparse.csr_matrix
matrix to fix
Returns
-------
fixed_matrix : sparse.csr_matrix
fixed matrix
newnode : int
index of the new node (usually last one).
"""
assert matrix.shape[0] == matrix.shape[1]
nnodes = matrix.shape[0]
out_degree = np.asarray(matrix.sum(1)).flatten()
# find sinks: out_degree == 0
sinks = np.argwhere(out_degree == 0).flatten()
# find sources: out_degree != 0
sources = np.argwhere(out_degree != 0).flatten()
# add a node at the end
matrix._shape = (nnodes + 1, nnodes + 1)
matrix.indptr = np.hstack((matrix.indptr, matrix.indptr[-1]))
# add link from sinks to basin
sink2basin = sparse.csr_matrix(
(
# it doesn't matter the value, we will compute the transition matrix from thid
np.ones_like(sinks),
(sinks, np.full_like(sinks, nnodes)),
),
shape=matrix.shape,
)
basin2sources = sparse.csr_matrix(
(
# it doesn't matter the value, we will compute the transition matrix from thid
out_degree[sources],
(np.full_like(sources, nnodes), sources),
),
shape=matrix.shape,
)
return matrix + sink2basin + basin2sources, nnodes
def __strip_basin__(matrix: sparse.csr_matrix, vector: np.array, basin=None):
"""Strip last node (column and row) of the matrix."""
assert matrix.shape[0] == matrix.shape[1]
if basin is None:
# assume the last node
basin = matrix.shape[0] - 1
keep = np.arange(matrix.shape[0])
keep = keep[keep != basin]
return extract_components(matrix, keep), vector[keep]
def adjacency(
data: Data,
tau: int = -1,
fix_sources: str = "basin",
return_factors=False,
symmetrize_weight=0.1,
return_intraction=False,
):
r"""Return the adjacency matrix of the data.
It picks the strongly connected component and return
.. math::
\Pi T
where :math:`\Pi` is the diagonal matrix of the steadystate and
:math:`T` is the transition matrix.
In this way all edge weights are the probability of being traversed.
Parameters
----------
data : DataBase or tuple
database of tweets or tuple(user2id_map, tails heads)
tau : int
parameter.
(Default value = -1)
fix_sources : bool, default=True
if a fix to source and sink nodes need to be applied.
This will performed adding a fake `basin` node as a bridge between sink and source nodes.
It will be removed before return. (Default value = True)
return_factors :
(Default value = False)
Returns
-------
adjacency : sparse
the adjacency matrix
umap : dict
map of index to user IDs
other_components : list of lists
list of components other that the largest.
"""
# compute hyper_edges (tails and heads)
if isinstance(data, tuple):
ui_map, tails, heads = data
else:
ui_map, tails, heads = hyper_edges(data)
iu_map = {i: u for u, i in ui_map.items()}
# put everything in a matrix (interaction matrix)
weighted_adj = __interaction_adjacency__(tails, heads, tau=tau)
if fix_sources == "basin":
# add a fake node to ensure the ergodicity
weighted_adj, basin = __fix_sources__(weighted_adj)
elif fix_sources == "symmetrize":
weighted_adj += symmetrize_weight * weighted_adj.T
# extract the largest connected component
comps = find_components(weighted_adj, kind="strong")
assert sum([len(c) for c in comps]) == weighted_adj.shape[0]
if fix_sources == "basin":
assert basin in comps[0]
weighted_adj = extract_components(weighted_adj, comps[0])
if return_intraction:
return (
weighted_adj, # the adjacency matrix
{i: iu_map[cind] for i, cind in enumerate(comps[0])}, # the node IDs
[[iu_map[i] for i in comp] for comp in comps[1:]], # nodes discarted
)
# compute the transition matrix and the steady state.
transition, steadystate = compute_transition_matrix(
weighted_adj,
return_steadystate=True,
niter=10000,
)
del weighted_adj
if fix_sources == "basin":
# remove the fake `basin` node
transition, steadystate = __strip_basin__(
transition, steadystate.A1, basin=comps[0].index(basin)
)
# renormalize rows
marginal = transition.sum(0).A1
# add a self loop to the nodes without outlinks.
transition += sparse.diags((marginal == 0).astype(int))
marginal[marginal == 0] = 1
transition = transition @ sparse.diags(1 / marginal)
# renormalize steadystate
steadystate /= steadystate.sum()
comps = [[c for c in comp if c != basin] for comp in comps]
print(f"STRONG COMPONENT: removing {basin} as basin node.")
else:
steadystate = steadystate.A1
assert len(comps[0]) == transition.shape[0]
transition.eliminate_zeros()
print(
f"STRONG COMPONENT: we discarted {len(ui_map) - transition.shape[0]}"
f" nodes {100 * (len(ui_map) - transition.shape[0]) / len(ui_map):4.2f}%."
)
print(f"STRONG COMPONENT: adjacency matrix of shape {transition.shape}")
if return_factors:
return (
transition,
steadystate,
{i: iu_map[cind] for i, cind in enumerate(comps[0])}, # the node IDs
[[iu_map[i] for i in comp] for comp in comps[1:]], # nodes discarted
)
# new adjacency matrix as probability A_{ij} \propto p(i, j)
new_adj = transition @ sparse.diags(steadystate)
return (
new_adj, # the adjacency matrix
{i: iu_map[cind] for i, cind in enumerate(comps[0])}, # the node IDs
[[iu_map[i] for i in comp] for comp in comps[1:]], # nodes discarted
)
def compute_transition_matrix(matrix, return_steadystate=False, niter=10000):
r"""Return the transition matrix.
Parameters
----------
matrix : sparse.spmatrix
the adjacency matrix (square shape)
return_steadystate : bool (default=False)
return steady state. (Default value = False)
niter : int (default=10000)
number of iteration to converge to the steadystate. (Default value = 10000)
Returns
-------
trans : np.spmatrix
The transition matrix.
v0 : np.matrix
the steadystate
"""
# marginal
tot = matrix.sum(0).A1
# fix zero division
tot_zero = tot == 0
tot[tot_zero] = 1
# transition matrix
trans = matrix @ sparse.diags(1 / tot)
# fix transition matrix with zero-sum rows
trans += sparse.spdiags(tot_zero.astype(int), 0, *trans.shape)
if return_steadystate:
v0 = matrix.sum(0)
v0 = v0.reshape(1, matrix.shape[0]) / v0.sum()
for i in range(niter):
# evolve v0
v1 = v0.copy()
v0 = v0 @ trans.T
if np.sum(np.abs(v1 - v0)) < 1e-7:
break
print(f"TRANS: performed {i} itertions.")
return trans, v0
return trans
def find_components(matrix, kind="strong"):
"""Return the components of the graph.
Parameters
----------
matrix : sparse.spmatrix
the adjacency square matrix
kind : str, default=`strong`
either `strong` or `weak` (Default value = 'strong')
Returns
-------
components : list
sorted list of components (list of node indexes)
"""
# check strongly connected component
ncomp, labels = sparse.csgraph.connected_components(
csgraph=matrix, directed=True, connection=kind
)
components = [[] for _ in range(ncomp)]
for node, label in enumerate(labels):
components[label].append(node)
return sorted(components, key=len, reverse=True)
def extract_components(matrix: sparse.spmatrix, indexes: list):
r"""Extract the sub matrix.
Parameters
----------
matrix : sparse.spmatrix
the matrix (square)
indexes : list
list of indeces to retain
Returns
-------
matrix : sparse.csc_matrix
matrix with rows and columns removed.
"""
return matrix.tocsr()[indexes, :].tocsc()[:, indexes]
def hindex(citations):
"""Compute the H-index.
Parameters
----------
citations : list
the list of number of citations per paper.
Returns
-------
hindex : int
the H-index
"""
srt = np.sort(citations)[::-1]
return (srt >= np.arange(1, len(srt) + 1)).sum()
| 28,239 | 27.296593 | 100 | py |
DataCovVac | DataCovVac-main/covvac-code/run_community_temporal.py | #!/usr/bin/env python3
"""Compute the probability of finding a critic or media tweet in communities.
And they outreach probabilities.
"""
import json
from collections import Counter
from datetime import date, timedelta
import numpy as np
from scipy import sparse
import cov
import cov_utils
def read_communities(tau):
"""Read community structure."""
with open(f"data/adjacency-tau_{tau}_nodes.json", "rt") as fin:
udata = {user["id"]: user for user in json.load(fin)["nodes"]}
comms = {}
for user in udata.values():
comms.setdefault(user["part"], set()).add(user["id"])
return comms, udata
def month_cycle(month1="2020-01", month2="2021-10"):
"""Month by month date ranges."""
year, month = map(int, month1.split("-"))
def formt(year, month):
return f"{year}-{month:02d}"
def increment(year, month):
if month == 12:
year += 1
month = 1
else:
month += 1
return year, month
while formt(year, month) != month2:
yield formt(year, month) + "-01", formt(*increment(year, month)) + "-01"
year, month = increment(year, month)
def win_cycle(win=30, step=7):
"""Sliding window."""
for day in cov_utils.daterange(
"2020-01-01", date.fromisoformat("2021-10-01") - timedelta(days=win), step=step
):
yield day.isoformat(), (day + timedelta(days=win)).isoformat()
def period_cycle():
"""Return the three main periods."""
dates = ['2020-01-01', '2020-11-08', '2021-06-01', '2021-10-01']
return zip(dates, dates[1:])
def main(tau=-1):
"""Do the main."""
print(f"============\nTAU {tau}\n============")
communities, ucomms = read_communities(tau)
best_comms = Counter({k: len(v) for k, v in communities.items()})
best_comms = {k: i for i, (k, _) in enumerate(best_comms.most_common(50))}
periods = list(month_cycle())
periods = list(win_cycle())
periods = list(period_cycle())
# extract data that only contains given urls
urls = cov.Urls()
output = {
"probs": {
kind: np.zeros((len(best_comms) + 1, len(best_comms) + 1, len(periods)))
# ^^^^^ fake node to collect all other communities
for kind in ["full", "crit", "media"]
},
"steady": {
kind: np.zeros((len(best_comms) + 1, len(periods)))
# ^^^^^ fake node to collect all other communities
for kind in ["full", "crit", "media"]
},
"active": {
kind: np.zeros((len(best_comms) + 1, len(periods)))
for kind in ["full", "crit", "media"]
},
}
for it, (day1, day2) in enumerate(periods):
print(day1, day2)
# load data
data = {}
data["full"] = cov.load_range(day1, day2)
data["crit"] = data["full"].extract(
tweet_filter=lambda x: {"PRE", "POST"} & set(urls.is_coded(x))
)
data["media"] = data["full"].extract(
tweet_filter=lambda x: "MEDIA" in set(urls.is_coded(x))
)
# compute probs
for kind, dt in data.items():
print(kind)
transition, steadystate, umap, _ = cov.adjacency(
dt, tau=tau, fix_sources="symmetrize", return_factors=True
)
adj = transition @ sparse.diags(steadystate)
# map user_index to comm_index
umap = {
i: best_comms[ucomms[uid]["part"]]
if ucomms[uid]["part"] in best_comms
else len(best_comms)
for i, uid in umap.items()
}
best_comm_prj = sparse.csr_matrix(
(
np.ones(len(umap)),
(list(umap.keys()), list(umap.values())),
),
shape=(adj.shape[0], len(best_comms) + 1),
)
output["probs"][kind][:, :, it] = (
best_comm_prj.T @ adj @ best_comm_prj
).toarray()
output["steady"][kind][:, it] = best_comm_prj.T @ steadystate
output["active"][kind][:, it] = best_comm_prj.sum(0)
for kind, tensor in output["probs"].items():
np.savez_compressed(
f"data/community_temporal_periods_{kind}_{tau}.npz",
probs=tensor,
steady=output["steady"][kind],
active=output["steady"][kind],
comms=list(best_comms.keys()),
periods=periods,
)
if __name__ == "__main__":
for tau in [-1, 0, 1]:
main(tau)
exit()
| 4,643 | 30.167785 | 88 | py |
DataCovVac | DataCovVac-main/covvac-plots/plot_engagement.py | #!/usr/bin/env python
"""Plots engagement for a given window."""
import csv
from datetime import date, timedelta
import numpy as np
from matplotlib import dates as mdates
from matplotlib import pyplot, ticker
import cov
import cov_utils
from conf import WIN
pyplot.style.use("mplrc")
grey = "#999999"
keywords = []
for topic in ["covid", "vaccine"]:
with open(f"../data/keys-{topic}.txt", "rt") as fin:
keywords += [line.rstrip() for line in fin]
keywords += ["covid__19", "covid19france", "covid_19fr", "covidー19"]
def check_event(day: str, win: int, ignore=None):
"""Check tweet content around an event.
Parameters
----------
day : date or str
the day of interest
win : int
the window duration in days: [day, day + win)
ignore : list
the hashtags to ignore
"""
if isinstance(day, str):
day = date.fromisoformat(day)
# data = cov.load_range(day - timedelta(days=win - 1), day + timedelta(days=1))
# we check the window following the day at which we measures a peak.
data = cov.load_range(day, day + timedelta(days=win))
rank = data.hashtags_rank()
if ignore is not None:
for d in ignore:
del rank[d]
print(", ".join([r[0] for r in rank.most_common(10)]))
print()
def label_fmt(x, pos):
if x == 0:
return "0"
if x % 1000000 == 0:
return f"{x//100000}M"
if x % 1000 == 0:
return f"{x//1000:.0f}k"
return f"{x}"
fig, axes = pyplot.subplots(
nrows=3,
figsize=(7, 3.8),
sharex=True,
gridspec_kw={
"hspace": 0,
"top": 0.82,
"right": 0.94,
"left": 0.08,
"bottom": 0.10,
"height_ratios": [0.2, 0.4, 0.4],
},
)
data = cov_utils.load_csv(f"../data/engagement-tau_{WIN}.tsv", transpose=True)
data["day"] = [date.fromisoformat(day) for day in data["day"]]
high = [dt for dt, val in zip(data["day"], data["rt"]) if val >= 20]
cases = cov_utils.load_cases("FR", transpose=True)
ax = axes[2]
ax.plot(data["day"], data["rt"])
ax.plot(data["day"], data["rt_media"], zorder=-1, alpha=0.8)
ax.set(ylabel=r"$R_t$")
ax.set_ylim(-5, 110)
# ax.tick_params("x", rotation=30)
ax = axes[1]
ax2 = ax.twinx()
next(ax2._get_lines.prop_cycler)
(full,) = ax2.plot(
data["day"],
cov_utils.smooth_convolve(data["media"], window=7),
linewidth=0.7,
label="News media",
)
(eng,) = ax.plot(
data["day"],
cov_utils.smooth_convolve(data["eng"], window=7),
linewidth=1,
label="Vaccine-critical",
)
ax2.tick_params("y", color=full.get_color(), labelcolor=full.get_color())
ax.tick_params("y", color=eng.get_color(), labelcolor=eng.get_color())
ax.legend(handles=[eng, full], fontsize="small", labelcolor="#555555", frameon=False)
ax.set_ylabel("Engaged", color=eng.get_color())
ax.yaxis.set_major_formatter(ticker.FuncFormatter(label_fmt))
ax2.yaxis.set_major_formatter(ticker.FuncFormatter(label_fmt))
ax = axes[0]
yes = np.array(cases["New_cases"])
xes = np.array(cases["day"])
yes = np.array([x if x is not None and not isinstance(x, str) else 0 for x in yes])
ax.fill_between(
xes,
cov_utils.smooth_convolve(yes, window=7),
color=grey,
alpha=0.5,
linewidth=0,
)
ax.set_ylabel("New cases")
ax.yaxis.set_major_formatter(ticker.FuncFormatter(label_fmt))
secax = ax.secondary_xaxis("top")
secax.set_xticks(list(high) + [date.fromisoformat("2020-12-30")])
secax.set_xticklabels(
[d.isoformat() for d in high] + ["first vac."],
rotation=90,
ha="center",
va="bottom",
fontdict={"fontsize": 7},
)
for locks in cov_utils.load_csv("../data/lockdowns.tsv"):
ax.axvspan(
date.fromisoformat(locks["start"]),
date.fromisoformat(locks["end"]),
alpha=0.2,
color="grey",
)
for dt in high:
print(dt)
check_event(dt, WIN, ignore=keywords)
for ax in axes:
ax.axvline(dt, color=grey, alpha=0.5, zorder=-1)
for ax in axes:
ax.axvline(date.fromisoformat("2020-12-30"), color=grey, alpha=0.5, zorder=-1)
for ax in axes[1:]:
ax.axvspan(date(2020, 1, 1), date(2020, 11, 11), alpha=0.2, color=grey)
ax.axvspan(date(2021, 6, 1), date(2021, 10, 1), alpha=0.2, color=grey)
fig.align_ylabels(axes)
locator = mdates.AutoDateLocator(minticks=6, maxticks=12)
formatter = mdates.ConciseDateFormatter(locator, formats=["%b\n%Y", "%b", "", "", "", ""])
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
with open(f"../data/engagement-events-tau_{WIN}.tsv", "wt") as fout:
writer = csv.writer(fout, dialect=csv.excel_tab)
writer.writerow(["event", "day"])
writer.writerows([(f"event-{i}", day) for i, day in enumerate(high)])
pyplot.savefig("plot_engagement.pdf")
| 4,737 | 26.229885 | 90 | py |
DataCovVac | DataCovVac-main/covvac-plots/plot_community_periods.py | #!/usr/bin/env python3
"""Plot the temporal behaviour of the communities on the three periods."""
import json
from collections import Counter
from datetime import date, timedelta
import numpy as np
from matplotlib import pyplot
import cov_utils
pyplot.style.use("mplrc")
grey = "#999999"
class NumpyEncoder(json.JSONEncoder):
"""A class to encode ndarray as list."""
def default(self, obj):
"""Encode as list."""
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def load_data():
"""Just load data."""
d = {}
for kind in ["crit", "media", "full"]:
print(kind)
np_data = np.load(f"../data/community_temporal_periods_{kind}_-1.npz")
d[kind] = {"in": dict(zip(np_data["comms"], np_data["steady"]))}
probs = np_data["probs"]
for i in range(probs.shape[-1]):
np.fill_diagonal(probs[:, :, i], 0)
d[kind]["out"] = dict(zip(np_data["comms"], probs.sum(0) / np_data["steady"]))
d[kind]["active"] = dict(zip(np_data["comms"], np_data["active"]))
d["periods"] = np_data["periods"]
d["days"] = [date.fromisoformat(p[0]) + timedelta(days=15) for p in d["periods"]]
d["partsize"] = Counter(cov_utils.get_communities(-1).values())
return d
def draw_balls(ax: pyplot.Axes, data, psize):
"""Draw balls."""
for i in range(7):
community = f"C{i}"
xes = data["out"][community]
yes = np.array(data["in"][community]) / psize[community]
for x1, y1, x2, y2 in zip(xes, yes, xes[1:], yes[1:]):
arrow_length = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if arrow_length < 0.05:
continue
ax.annotate(
"",
(x2, y2),
xytext=(x1, y1),
arrowprops=dict(
facecolor=community,
linewidth=0,
shrink=0.0,
width=5,
headwidth=5,
headlength=arrow_length * 100,
alpha=0.4,
connectionstyle="arc3,rad=0.2",
),
)
ax.scatter(xes, yes, label=community, s=psize[f"C{i}"] / 1000, alpha=[0.8, 0.5, 0.3])
ax.semilogy()
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(1e-9, 1e-4)
def main():
"""Do the main."""
dataset = load_data()
fig, axes = pyplot.subplots(
ncols=3,
nrows=1,
sharey=True,
sharex=True,
gridspec_kw={
"wspace": 0,
"top": 0.92,
"right": 0.99,
"left": 0.1,
"bottom": 0.18,
},
figsize=(8, 3.3),
)
draw_balls(axes[0], dataset["crit"], dataset["partsize"])
axes[0].set_title("Vaccine critical URLs")
axes[0].set_ylabel("Average visiting probability")
draw_balls(axes[1], dataset["media"], dataset["partsize"])
axes[1].set_title("Media")
axes[1].set_xlabel("Escape probability")
axes[1].tick_params(left=False, which="both")
draw_balls(axes[2], dataset["full"], dataset["partsize"])
axes[2].set_title("Full hyper-graph")
axes[2].legend(fancybox=False, frameon=False)
axes[2].tick_params(left=False, which="both")
# fig.suptitle("Evolution of community reach in the three periods")
pyplot.tight_layout(h_pad=0, w_pad=0)
pyplot.savefig("plot_community_periods.pdf")
if __name__ == "__main__":
main()
| 3,513 | 28.283333 | 93 | py |
DataCovVac | DataCovVac-main/covvac-plots/plot_hashtag_periods.py | #!/usr/bin/env python
"""Most important hashtags per period."""
import json
from collections import Counter
from datetime import date
import numpy as np
import squarify
from matplotlib import colors, pyplot
from matplotlib.transforms import Bbox
import cov
import cov_text
import cov_utils
pyplot.style.use("mplrc")
grey = "#999999"
class Hashtags:
"""Collecting data.
Collect count of hashtags per day in __data__[hashtag][day]
Collect best 3 hashtags of each day in __best_hashtags__
Collect all Hashtags in __hashtags__
Tot number of hashtag per day in __day_tot__
"""
def __init__(self):
self.__days__ = []
self.__hashtags__ = set()
self.__best_hashtags__ = set()
self.__data__ = {}
self.__day_tot__ = {}
def update(self, day, hashtags):
"""Update, adding new hashtags."""
try:
iday = self.__days__.index(day)
except ValueError:
iday = None
self.__hashtags__ |= set(hashtags.keys())
if iday is None:
self.__days__.append(day)
for hashtag, count in hashtags.items():
self.__data__.setdefault(day, Counter())
self.__data__[day][hashtag] += count
self.__day_tot__.setdefault(day, 0)
self.__day_tot__[day] += sum(hashtags.values())
def timeseries(self, use=None):
"""Return the timeseries of each hashtag.
Parameters
----------
use: list or None
list of hashtags to use (if None use the best 3 hashtags per day).
"""
if use is None:
use = self.__best_hashtags__
use = {
k for dhash in self.__data__.values() for k, v in dhash.most_common(3)
}
data = np.zeros((len(use), len(self.__days__)))
bht = {ht: i for i, ht in enumerate(use)}
for iday, day in enumerate(self.__days__):
for ht, iht in bht.items():
data[iht, iday] += self.__data__[day].get(ht, 0)
# tots = [val for day, val in sorted(self.__day_tot__.items())]
tots = [self.__day_tot__[day] for day in self.__days__]
return list(bht.keys()), self.__days__, data, np.array(tots)
def get_daily_hashtags(day: date, keywords=set()):
"""Extract all daily hashtags with count.
Parameters
----------
day : date
day of interest
keywords :
(Default value = set())
keywords to remove
Returns
-------
Hashtags : dict
map of day: Counter of hashtags
"""
daily = cov.load_day(day)
hashtags = {day.isoformat(): Counter()}
for tid, tweet, rtws in daily.tweets.iter():
for hashtag in tweet.get("hashtags", "").split("|"):
if hashtag == "":
continue
hashtags[day.isoformat()][hashtag] += 1
for rtw in rtws:
rtw_day = rtw["created_at"][:10]
hashtags.setdefault(rtw_day, Counter())
hashtags[rtw_day][hashtag] += 1
if len(keywords) > 0:
for kwd in keywords | {"covidー19", "covid19fr", "covid19france", ""}:
for hash in hashtags.values():
hash.pop(kwd, None)
return hashtags
def filter_entropy(data, hashtags, min_count_rate=None):
nht, ntime = data.shape
thresh = np.log2(ntime) * 0.8
filtered = [i for i in range(nht) if cov_utils.entropy(data[i, :]) < thresh]
if min_count_rate is not None:
filtered = [i for i in filtered if data[i, :].sum() > min_count_rate * ntime]
return data[filtered, :], [hashtags[ht] for ht in filtered]
def sort_part(matrix: np.array, hashtags: list):
_matrix = matrix.copy()
for i in range(_matrix.shape[0]):
nsum = _matrix[i, :].sum()
_matrix[i, :] = cov_utils.smooth_convolve(_matrix[i, :], window=7)
_matrix[i, :] *= nsum / _matrix[i, :].sum()
parts = cov_utils.cluster_first_axis(_matrix)
maxes = _matrix.argmax(axis=1)
parts = [p for p in sorted(parts, key=lambda x: np.mean([maxes[i] for i in x]))]
sep = []
for p in parts:
sep.append(len(p))
indices = [i for nodes in parts for i in nodes]
return indices, np.cumsum(sep)
def auto_fit_fontsize(text, width, height, fig=None, ax=None):
"""Auto-decrease the fontsize of a text object.
Args:
text (matplotlib.text.Text)
width (float): allowed width in data coordinates
height (float): allowed height in data coordinates
"""
fig = fig or pyplot.gcf()
ax = ax or pyplot.gca()
# get text bounding box in figure coordinates
renderer = fig.canvas.get_renderer()
bbox_text = text.get_window_extent(renderer=renderer)
# transform bounding box to data coordinates
bbox_text = Bbox(ax.transData.inverted().transform(bbox_text))
# evaluate fit and recursively decrease fontsize until text fits
fits_width = bbox_text.width < width if width else True
fits_height = bbox_text.height < height if height else True
if not all((fits_width, fits_height)) and text.get_fontsize() >= 5:
text.set_fontsize(text.get_fontsize() - 1)
auto_fit_fontsize(text, width, height, fig, ax)
def add_text(labels: list, squares: list, ax: pyplot.Axes, fig: pyplot.Figure):
for text, rect in zip(labels, squares):
ratio = rect["dx"] / rect["dy"]
if ratio > 1.5:
angle = 0
elif ratio < 0.75:
angle = 90
else:
angle = 45
axtext = ax.text(
rect["x"] + 0.5 * rect["dx"],
rect["y"] + 0.5 * rect["dy"],
text,
fontsize=24,
ha="center",
va="center",
rotation=angle,
)
auto_fit_fontsize(axtext, rect["dx"], rect["dy"], fig=fig, ax=ax)
def load_data():
dt_ranges = [
list(cov_utils.daterange("2020-01-01", "2020-11-11")),
list(cov_utils.daterange("2020-11-11", "2021-06-01")),
list(cov_utils.daterange("2021-06-01", "2021-10-01")),
]
dt_map = {
dt.isoformat(): indx for indx, dtrange in enumerate(dt_ranges) for dt in dtrange
}
cache = cov_utils.BASENAME / "data/hashtags_period_count.json"
if cache.is_file():
with open(cache, "rt") as fin:
hts = json.load(fin)
return [Counter(x) for x in hts]
hts = [Counter() for _ in dt_ranges]
keywords = cov_utils.get_keywords("covid") | cov_utils.get_keywords("vaccine")
for dtrange in dt_ranges:
for day in dtrange:
print(day, end="\r")
hashtags = get_daily_hashtags(day, keywords)
for day, hashs in hashtags.items():
try:
indx = dt_map[day]
except KeyError:
continue
hts[indx] += hashs
with open(cov_utils.BASENAME / "data/hashtags_period_count.json", "wt") as fout:
json.dump([dict(x) for x in hts], fout)
return hts
def more_than_expected(corpus: list):
hts = set([ht for doc in corpus for ht in doc])
hts = {ht: indx for indx, ht in enumerate(hts)}
def csum(c):
return np.sum(list(c.values()))
cum = Counter()
for doc in corpus:
cum += doc
hts = [
sorted(
list(doc.keys()),
# key=lambda x: doc[x] * csum(cum) / cum[x] / csum(doc),
key=lambda x: doc[x] / cum[x],
reverse=True,
)[:20]
for doc in corpus
]
return hts
def main():
"""Do the main."""
hts = load_data()
for ht in hts:
print(ht.most_common(5))
# out = cov_text.tfidf(
# [" ".join(ht.elements()) for ht in hts],
# stop_words=["../data/stopwords-en.json", "../data/stopwords-fr.json"],
# )
# best_words = cov_text.tfidf_best_words(*out, nwords=20)
best_words = [[x[0] for x in ht.most_common(20)] for ht in hts]
# best_words = more_than_expected(hts)
print(best_words)
fig, axes = pyplot.subplots(
ncols=3,
figsize=(12, 4),
gridspec_kw={
"hspace": 0.1,
"wspace": 0.1,
"top": 0.9,
"right": 0.98,
"left": 0.02,
"bottom": 0.02,
},
)
for hashtags, words, ax in zip(hts, best_words, axes):
sizes = [hashtags[w] for w in words]
rects = squarify.squarify(
squarify.normalize_sizes(sizes, 100, 100), 0, 0, 100, 100
)
squarify.plot(sizes, ax=ax, norm_y=100, norm_x=100, pad=False, alpha=0.5)
add_text(words, rects, ax, fig)
ax.axis("off")
axes[0].set_title("First Period")
axes[1].set_title("Second Period")
axes[2].set_title("Third Period")
pyplot.savefig("plot_hashtag_periods.pdf")
if __name__ == "__main__":
main()
| 8,878 | 27.827922 | 88 | py |
DataCovVac | DataCovVac-main/covvac-plots/plot_community_reach_onlyprob.py | #!/usr/bin/env python3
"""Plot community reach.
depends on:
- ../run_10_make_adj.py
- ../
"""
import json
from collections import Counter
import numpy as np
from adjustText import adjust_text
from matplotlib import pyplot
import cov_utils
from conf import TAU
pyplot.style.use("mplrc")
grey = "#999999"
def load_comm_retweets(kinds="PRE"):
if isinstance(kinds, str):
kinds = [
kinds,
]
with open(f"../data/community_reach_edgelist-tau_{TAU}.json", "rt") as fin:
data = json.load(fin)
base = {"out": 0, "in": 0, "self": 0}
degrees = {}
for kind in kinds:
for edge in data[kind.lower()]:
degrees.setdefault(edge["from"], base.copy())
degrees.setdefault(edge["to"], base.copy())
if edge["from"] == edge["to"]:
degrees[edge["from"]]["self"] += edge["norm_weight"]
else:
degrees[edge["from"]]["out"] += edge["norm_weight"]
degrees[edge["to"]]["in"] += edge["norm_weight"]
return {
"part": list(degrees.keys()),
} | {k: np.array([d[k] for d in degrees.values()]) for k in base}
def comm_size():
"""Return the size of each partition.
Returns
-------
count : dict
a dict of partiton -> number of nodes.
"""
with open(f"../data/adjacency-tau_{TAU}_nodes.json", "rt") as fin:
data = json.load(fin)["nodes"]
return Counter([d["part"] for d in data])
def draw_labels(ax, *args):
texts = []
for x, y, t in args:
texts.append(
ax.text(
x,
y,
t,
va="center",
ha="center",
transform=ax.transAxes,
color=t,
)
)
adjust_text(
texts,
ax=ax,
autoalign="y",
arrowprops=dict(arrowstyle="-", lw=0.5, alpha=0.5),
transform=ax.transAxes,
)
def plot_balls(ax, xes, yes, sizes, colors, parts):
ax.scatter(xes, yes, sizes / 100, color=[colors[p] for p in parts], alpha=0.8)
text = [x for x in zip(xes, yes, parts) if x[2] in set([f"C{i}" for i in range(7)])]
ax.set_yscale("log")
ax.set_ylim(1e-9, 1e-4)
draw_labels(ax, *text)
def main():
"""Do the main."""
fig, axes = pyplot.subplots(
ncols=3,
nrows=1,
figsize=(8, 3.3),
sharey="row",
sharex=True,
squeeze=False,
gridspec_kw={
"wspace": 0,
"top": 0.92,
"right": 0.99,
"left": 0.1,
"bottom": 0.18,
},
)
csize = comm_size()
data = cov_utils.load_csv(f"../data/community_outreach-tau_{TAU}.tsv", transpose=True)
print(data.keys())
for k, v in data.items():
if k != "part":
data[k] = np.array(v)
rng = np.random.default_rng()
# colors = {p: cmap(r) for p, r in zip(csize, rng.random(len(csize)))}
colors = {p: "#666666" for p, r in zip(csize, rng.random(len(csize)))}
for i in range(7):
colors[f"C{i}"] = f"C{i}"
# Critic probs
ax = axes[0, 0]
size = np.array([csize[p] for p in data["part"]])
xes = data["crit_prob_in_out"] / data["crit_prob_steady"]
yes = data["crit_prob_steady"] / size
plot_balls(ax, xes, yes, size, colors, data["part"])
ax.set_title("Vaccine critical URLs")
ax.set_ylabel("Average visit probability")
# Media probs
ax = axes[0, 1]
xes = data["media_prob_in_out"] / data["media_prob_steady"]
yes = data["media_prob_steady"] / size
plot_balls(ax, xes, yes, size, colors, data["part"])
ax.tick_params(left=False, which="both")
ax.set_title("Media")
ax.set_xlabel("Escape probability", labelpad=10)
# full probs
ax = axes[0, 2]
xes = data["full_prob_in_out"] / data["full_prob_steady"]
yes = data["full_prob_steady"] / size
plot_balls(ax, xes, yes, size, colors, data["part"])
ax.tick_params(left=False, which="both")
ax.set(title="Full hyper-graph")
pyplot.savefig("plot_community_reach_onlyprob.pdf")
if __name__ == "__main__":
main()
| 4,151 | 24.95 | 90 | py |
DataCovVac | DataCovVac-main/covvac-plots/plot_simple_stats.py | #!/usr/bin/env python3
"""Plot tweets and retweets fraction of critic and media content."""
from datetime import date
import numpy as np
from matplotlib import pyplot
import cov_utils
pyplot.style.use("mplrc")
grey = "#999999"
def main():
"""Do your stuff."""
data = cov_utils.load_csv("../data/daily_stats.tsv", transpose=True)
print(*data.keys())
data["day"] = [date.fromisoformat(day) for day in data["day"]]
for key in data:
if key[:3] == "twt" or key[:4] == "rtwt":
data[key] = np.array(data[key])
fig, axes = pyplot.subplots(
nrows=2,
figsize=(7, 3),
sharex=True,
gridspec_kw={
"hspace": 0,
"top": 0.9,
"right": 0.95,
"left": 0.15,
"height_ratios": [0.6, 0.4],
},
)
crit = data["twt_crit"] + data["rtwt_crit"]
media = data["twt_media"] + data["rtwt_media"]
tot = crit + media + data["twt_nourls"] + data["rtwt_nourls"]
tot[tot == 0] = 1
ax = axes[0]
ax.plot(data["day"], crit / tot, label="Vaccine critical URLs")
ax.plot(data["day"], media / tot, label="Media URLs", zorder=-1)
ax.set_ylabel("Fraction")
ax = axes[1]
ax.plot(data["day"], crit)
ax.plot(data["day"], media)
ax.plot(data["day"], tot, color=grey, label="Total flow")
ax.set_ylabel("Count")
for ax in axes:
ax.axvline(date(2020, 11, 11), color=grey, zorder=-1, alpha=0.5)
ax.axvline(date(2021, 6, 1), color=grey, zorder=-1, alpha=0.5)
fig.legend()
fig.align_labels()
pyplot.savefig("plot_simple_stats.pdf")
if __name__ == "__main__":
main()
| 1,658 | 25.758065 | 72 | py |
DataCovVac | DataCovVac-main/covvac-plots/plot_urls_comms_adj.py | #!/usr/bin/env python
"""Draw a plot of the community-urls usage."""
import json
import networkx as nx
import numpy as np
from matplotlib import colors, pyplot
from scipy import sparse
from sklearn import cluster
from conf import TAU
pyplot.style.use("mplrc")
def get_keywords(topic):
"""Load filtering words.
Get all possible keywords plus the minimum set of keywords contained in the full set.
Parameters
----------
topic : str
the topic (one of `covid` or `vaccine`)
Returns
-------
topics : dict
a dictionary with a list of self excluding tags (`short`) and all tags (`long`)
"""
filepath = f"../data/keys-{topic}.txt"
print(filepath)
with open(filepath, "rt") as fin:
long = set([line.lower().strip() for line in fin])
return long
def load_mats(kind):
basename = f"../data/stats_comm_tau_{TAU}_"
adj = sparse.load_npz(basename + kind + ".npz")
with open(basename + kind + "_map.json", "rt") as fin:
names = json.load(fin)
if kind == "hashtags":
keywords = get_keywords("covid") | get_keywords("vaccine")
keywords |= {"covid__19", "covid19france", "covid_19fr", "covidー19"}
indexes = [v for k, v in names["feat2indx"].items() if k not in keywords]
adj = adj[:, indexes]
names["feat2indx"] = {
k: i for i, k in enumerate([n for n in names["feat2indx"].keys() if n not in keywords])
}
inv_names = {}
for kname, name in names.items():
inv_names[kname] = {int(i): k for k, i in name.items()}
return adj, inv_names
def load_data(kinds):
"""Load data."""
if isinstance(kinds, str):
# load data
adj, names = load_mats(kinds)
else:
adj, names = load_mats(kinds[0])
print(adj.sum())
for kind in kinds:
_adj, _names = load_mats(kind)
adj += _adj
adj = adj.toarray()
# remove under represented communities and urls
threshold = np.sort(adj.sum(1))[-30]
c = np.argwhere(adj.sum(1) >= threshold).flatten()
threshold = np.sort(adj.sum(0))[-50]
u = np.argwhere(adj.sum(0) >= threshold).flatten()
_names = {}
adj = adj[c, :]
_names["communities"] = [names["comm2indx"][i] for i in c]
adj = adj[:, u]
_names["features"] = [names["feat2indx"][i] for i in u]
print(adj.shape)
return adj, _names
def projection(labels):
"""Return the projector to the label space."""
proj = np.zeros((len(np.unique(labels)), len(labels)))
proj[(labels, np.arange(len(labels)))] = 1
return proj
def arg_label_sort(labels, weights):
"""Return the indices that sort the labels.
Use the weights to sort the labels,
integrating over all entries with the same label.
"""
proj = projection(labels)
comm_weights = proj @ weights
indx = np.argsort(comm_weights[labels], kind="stable")[::-1]
return indx
def cluster_labels(matrix, use="agglomerative"):
"""Return labels."""
if use == "agglomerative":
kmeans = cluster.AgglomerativeClustering(n_clusters=7, affinity="cosine", linkage="average")
xlabels = kmeans.fit(matrix).labels_
kmeans = cluster.FeatureAgglomeration(n_clusters=7, affinity="cosine", linkage="average")
ylabels = kmeans.fit(matrix).labels_
else:
adj1 = matrix @ matrix.T
adj1[adj1 < 0] = 0
xlabels = covid.directed_partition(nx.from_numpy_array(adj1))
xlabels = np.array(list(xlabels.values()))
adj2 = matrix.T @ matrix
adj2[adj2 < 0] = 0
ylabels = covid.directed_partition(nx.from_numpy_array(adj2))
ylabels = np.array(list(ylabels.values()))
return xlabels, ylabels
def sort_adj(adj, freq, names):
"""Sort the matrix."""
def sort_all(indx, labels, adj, freq, names, axis="communities"):
if axis == "communities":
adj[:, :] = adj[indx, :]
freq[:, :] = freq[indx, :]
else:
adj[:, :] = adj[:, indx]
freq[:, :] = freq[:, indx]
names[axis] = [names[axis][i] for i in indx]
rlabels = labels[indx]
return [i for i, (l1, l2) in enumerate(zip(rlabels, rlabels[1:])) if l1 != l2]
ncomm, nurl = adj.shape
# sort communities based on retweets
xlabels = np.arange(ncomm)
indx = arg_label_sort(xlabels, freq.sum(1))
sort_all(indx, xlabels, adj, freq, names, axis="communities")
# sort urls based on retweets
ylabels = np.arange(nurl)
indx = arg_label_sort(ylabels, freq.sum(0))
sort_all(indx, ylabels, adj, freq, names, axis="features")
xlabels, ylabels = cluster_labels(adj, use="agglomerative")
# sort communities based on similarity
indx = arg_label_sort(xlabels, freq.sum(1))
sep_comms = sort_all(indx, xlabels, adj, freq, names, axis="communities")
# sort urls based on similarity
indx = arg_label_sort(ylabels, freq.sum(0))
sep_urls = sort_all(indx, ylabels, adj, freq, names, axis="features")
return sep_urls, sep_comms
def main():
"""Do main."""
adj_freq, names = load_data(["urls_pre", "urls_post"])
__plot_adj__(adj_freq, names, "plot_url_crit_heatmap.pdf")
adj_freq, names = load_data(["urls_media"])
__plot_adj__(adj_freq, names, "plot_url_media_heatmap.pdf")
adj_freq, names = load_data(["hashtags"])
__plot_adj__(adj_freq, names, "plot_url_hash_heatmap.pdf")
def __plot_adj__(matrix, names, filename):
marginal0 = matrix.sum(0) / matrix.sum()
marginal1 = matrix.sum(1) / matrix.sum()
adj_null = np.outer(marginal1, marginal0)
adj = matrix / matrix.sum() - adj_null
print(names.keys())
sep_urls, sep_comms = sort_adj(adj, matrix, names)
fig, axes = pyplot.subplots(
nrows=2,
ncols=2,
sharex="col",
sharey="row",
figsize=(8, 8),
gridspec_kw={
"top": 0.75,
"bottom": 0,
"right": 1,
"hspace": 0,
"wspace": 0,
"width_ratios": [3, 1],
"height_ratios": [4, 1],
},
)
# plot main adj
n_comm, n_urls = adj.shape
ax = axes[0, 0]
ax2 = ax.twiny()
vmax = max(np.abs(adj.min()), np.abs(adj.max()))
cadj = ax.matshow(
adj,
aspect="auto",
cmap="PiYG",
norm=colors.SymLogNorm(linthresh=0.00001, vmin=-vmax, vmax=vmax),
)
cb_ax = pyplot.axes([0.77, 0.05, 0.2, 0.2], frame_on=False, xticks=[], yticks=[])
fig.colorbar(
cadj,
ax=cb_ax,
orientation="horizontal",
aspect=12,
)
ax.set(
xticks=[],
yticks=range(n_comm),
yticklabels=names["communities"],
ylabel="Communities",
)
ax2.set(
xlim=(0, n_urls),
xticks=np.arange(n_urls) + 0.5,
)
def truncate(string, lenght=30):
if len(string) <= 30:
return string
return string[:29] + "…"
ax2.set_xticklabels([truncate(n) for n in names["features"]], rotation=90, fontsize="x-small")
for y in sep_comms:
ax.axhline(y + 0.5)
for x in sep_urls:
ax.axvline(x + 0.5)
# plot url density
ax = axes[1, 0]
ax.bar(np.arange(n_urls), -matrix.sum(0))
ax.set(
xticks=[],
yticks=[],
ylabel="Number of\nretweets",
frame_on=False,
)
# plot comm density
ax = axes[0, 1]
ax2 = ax.twiny()
ax.barh(np.arange(n_comm), matrix.sum(1), log=True)
ax.set(
frame_on=False,
)
ax2.set(
frame_on=False,
xlabel="Number of retweets\n(log scale)",
xticks=[],
)
ax.axis("off")
ax = axes[1, 1]
ax.axis("off")
fig.align_ylabels(axes)
pyplot.savefig(filename)
if __name__ == "__main__":
main()
| 7,814 | 26.421053 | 100 | py |
erics | erics-main/erics.py | import numpy as np
from scipy.stats import norm
from warnings import warn
import copy
import time
class ERICS:
def __init__(self, n_param, window_mvg_average=50, window_drift_detect=50, beta=0.0001, base_model='probit',
init_mu=0, init_sigma=1, epochs=10, lr_mu=0.01, lr_sigma=0.01):
"""
ERICS: Effective and Robust Identification of Concept Shift
please cite:
[1] ERICS Paper (Todo)
[2] Haug, Johannes, et al. "Leveraging Model Inherent Variable Importance for Stable Online Feature Selection."
Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 2020.
:param n_param: (int) Total no. of parameters (corresponds to no. of features for probit model)
:param window_mvg_average: (int) Window Size for computation of moving average
:param window_drift_detect: (int) Window Size for Drift Detection
:param beta: (float) Update rate for the alpha-threshold
:param base_model: (str) Name of the base predictive model (whose parameters we investigate)
:param init_mu: (int) Initialize mean of parameter distributions (according to [2])
:param init_sigma: (int) Initialize variance of parameter distributions (according to [2])
:param epochs: (int) Number of epochs for optimization of parameter distributions (according to [2])
:param lr_mu: (float) Learning rate for the gradient update of the mean (according to [2])
:param lr_sigma: (float) Learning rate for the gradient update of the variance (according to [2])
"""
# User-set ERICS-hyperparameters
self.n_param = n_param
self.M = window_mvg_average
self.W = window_drift_detect
self.beta = beta
self.base_model = base_model
# Default hyperparameters
self.time_step = 0 # Current Time Step
self.time_since_last_global_drift = 0 # Time steps since last global drift detection
self.time_since_last_partial_drift = np.zeros(n_param) # Time steps since last partial drift detection
self.global_drifts = [] # Time steps of all global drifts
self.partial_drifts = [] # (time step, param.idx)-tuples of all partial drifts
self.alpha = None # Adaptive threshold for global concept drift detection
self.partial_alpha = np.asarray([None] * self.n_param) # Adaptive threshold for partial concept drift detection
self.mu_w = np.ones((self.M, self.n_param)) * init_mu # Parameter Mean in window
self.sigma_w = np.ones((self.M, self.n_param)) * init_sigma # Parameter Variance in window
self.param_sum = np.zeros((self.M - 1, self.n_param)) # Sum-expression for computation of moving average (see Eq. (8) in [1])
self.global_info_ma = [] # Global moving average
self.partial_info_ma = [] # Partial moving average
# Parameters of FIRES (Probit) model according to [2]
if self.base_model == 'probit':
self.fires_mu = np.ones(self.n_param) * init_mu
self.fires_sigma = np.ones(self.n_param) * init_sigma
self.fires_epochs = epochs
self.fires_lr_mu = lr_mu
self.fires_lr_sigma = lr_sigma
self.fires_labels = [] # Unique labels (fires requires binary labels)
self.fires_encode_labels = True # Indicator for warning message (auto-encoded labels)
# ### ADD YOUR OWN MODEL PARAMETERS HERE ############################
# if self.base_model == 'your_model':
# # define parameters
#####################################################################
def check_drift(self, x, y):
"""
Process data batch and check for concept drift
:param x: (np.ndarray) Batch of observations
:param y: (np.ndarray) Batch of labels
:return: indicator global drift, indicator partial drift, computation time in sec.
:rtype bool, bool, float
"""
# Update alpha (Eq. 7 in [1])
if self.alpha is not None:
self.alpha -= (self.alpha * self.beta * self.time_since_last_global_drift)
for k in range(self.n_param): # partial alpha
if self.partial_alpha[k] is not None:
self.partial_alpha[k] -= (self.partial_alpha[k] * self.beta * self.time_since_last_partial_drift[k])
# Update time since drift
self.time_since_last_global_drift += 1
self.time_since_last_partial_drift += 1
# Update Parameter distribution
if self.base_model == 'probit':
self.__update_probit(x, y) # Probit model
# ### ADD YOUR OWN MODEL HERE #######################################
# elif(self.base_model == 'your_model':
# self.__update_your_model(x,y)
#####################################################################
else:
raise NotImplementedError('The base model {} has not been implemented.'.format(self.base_model))
start = time.time() # Start time drift detection
self.__update_param_sum() # Update the sum expression for observations in a shifting window
self.__compute_moving_average() # Compute moving average in specified window
g_drift, p_drift = self.__detect_drift() # Detect concept drift
# Update time step
self.time_step += 1
return g_drift, p_drift, time.time() - start
def __update_param_sum(self):
"""
Retrieve current parameter distribution and compute sum expression according to Eq. (8) in the ERICS paper [1]
"""
# Retrieve current distribution parameters
if self.base_model == 'probit':
new_mu = copy.copy(self.fires_mu).reshape(1, -1)
new_sigma = copy.copy(self.fires_sigma).reshape(1, -1)
# ### ADD YOUR OWN MODEL HERE #######################################
# elif(self.base_model == 'your_model':
# new_mu = your_model.mu
# new_sigma = your_model.sigma
#####################################################################
else:
raise NotImplementedError('The base model {} has not been implemented.'.format(self.base_model))
# Drop oldest entry from window
self.mu_w = self.mu_w[1:, :]
self.sigma_w = self.sigma_w[1:, :]
# Add new entry to window
self.mu_w = np.concatenate((self.mu_w, new_mu))
self.sigma_w = np.concatenate((self.sigma_w, new_sigma))
# Compute parameter sum expression
for t in range(self.M - 1):
self.param_sum[t, :] = (self.sigma_w[t + 1, :] ** 2 + (self.mu_w[t, :] - self.mu_w[t + 1, :]) ** 2) / self.sigma_w[t, :] ** 2
def __compute_moving_average(self):
"""
Compute the moving average (according to Eq. (8) in the ERICS paper [1])
"""
partial_ma = np.zeros(self.n_param)
global_score = np.zeros(self.M - 1)
for k in range(self.n_param):
partial_score = self.param_sum[:, k] - 1
global_score += partial_score
partial_ma[k] = np.sum(np.abs(partial_score)) / (2 * self.M) # Add partial mov. avg. for parameter k
global_ma = np.sum(np.abs(global_score)) / (2 * self.M)
self.global_info_ma.append(global_ma)
self.partial_info_ma.append(partial_ma)
def __detect_drift(self):
"""
Detect global and partial concept drift using the adaptive alpha-threshold
:return: global drift indicator, partial drift indicator
:rtype: bool, bool
"""
global_window_delta = None
partial_window_delta = None
# Compute delta in sliding window W (according to Eq. (5) in the ERICS paper [1])
if self.W < 2:
self.W = 2
warn('Sliding window for concept drift detection was automatically set to 2 observations.')
if len(self.global_info_ma) < self.W:
oldest_entry = len(self.global_info_ma)
else:
oldest_entry = self.W
if oldest_entry == 1: # In case of only one observation
global_window_delta = copy.copy(self.global_info_ma[-1])
partial_window_delta = copy.copy(self.partial_info_ma[-1])
else:
for t in range(oldest_entry, 1, -1):
if t == oldest_entry:
global_window_delta = self.global_info_ma[-t+1] - self.global_info_ma[-t] # newer - older
partial_window_delta = self.partial_info_ma[-t+1] - self.partial_info_ma[-t]
else:
global_window_delta += (self.global_info_ma[-t+1] - self.global_info_ma[-t])
partial_window_delta += (self.partial_info_ma[-t+1] - self.partial_info_ma[-t])
# (Re-) Initialize alpha if it is None (at time step 0 or if a drift was detected)
if self.alpha is None:
self.alpha = np.abs(global_window_delta) # according to Eq. (6) in [1] -> abs() is only required at t=0, to make sure that alpha > 0
if None in self.partial_alpha:
unspecified = np.isnan(self.partial_alpha.astype(float)).flatten()
self.partial_alpha[unspecified] = np.abs(partial_window_delta[unspecified])
# Global Drift Detection
g_drift = False
if global_window_delta > self.alpha:
g_drift = True
self.global_drifts.append(self.time_step)
self.time_since_last_global_drift = 0
self.alpha = None
# Partial Drift Detection
p_drift = False
partial_drift_bool = partial_window_delta > self.partial_alpha
for k in np.argwhere(partial_drift_bool):
p_drift = True
self.partial_drifts.append((self.time_step, k.item()))
self.time_since_last_partial_drift[k] = 0
self.partial_alpha[k] = None
return g_drift, p_drift
###########################################
# BASE MODELS
##########################################
def __update_probit(self, x, y):
"""
Update parameters of the Probit model
According to [2], as implemented here https://github.com/haugjo/fires
We have slightly adjusted the original code to fit our use case.
:param x: (np.ndarray) Batch of observations (numeric values only, consider normalizing data for better results)
:param y: (np.ndarray) Batch of labels: type binary, i.e. {-1,1} (bool, int or str will be encoded accordingly)
"""
# Encode labels
for y_val in np.unique(y): # Add newly observed unique labels
if y_val not in set(self.fires_labels):
self.fires_labels.append(y_val)
if tuple(self.fires_labels) != (-1, 1): # Check if labels are encoded correctly
if self.fires_encode_labels:
warn('FIRES WARNING: The target variable will automatically be encoded as {-1, 1}.')
self.fires_encode_labels = False # set indicator to false
if len(self.fires_labels) < 2:
y[y == self.fires_labels[0]] = -1
elif len(self.fires_labels) == 2:
y[y == self.fires_labels[0]] = -1
y[y == self.fires_labels[1]] = 1
else:
raise ValueError('The target variable y must be binary.')
for epoch in range(self.fires_epochs):
# Shuffle the observations
random_idx = np.random.permutation(len(y))
x = x[random_idx]
y = y[random_idx]
# Iterative update of mu and sigma
try:
# Helper functions
dot_mu_x = np.dot(x, self.fires_mu)
rho = np.sqrt(1 + np.dot(x ** 2, self.fires_sigma ** 2))
# Gradients
nabla_mu = norm.pdf(y / rho * dot_mu_x) * (y / rho * x.T)
nabla_sigma = norm.pdf(y / rho * dot_mu_x) * (
- y / (2 * rho ** 3) * 2 * (x ** 2 * self.fires_sigma).T * dot_mu_x)
# Marginal Likelihood
marginal = norm.cdf(y / rho * dot_mu_x)
# Update parameters
self.fires_mu += self.fires_lr_mu * np.mean(nabla_mu / marginal, axis=1)
self.fires_sigma += self.fires_lr_sigma * np.mean(nabla_sigma / marginal, axis=1)
except TypeError as e:
raise TypeError('All features must be a numeric data type.') from e
# ### ADD YOUR OWN MODEL HERE #######################################
# def __update_your_model(x,y):
# # update the parameters of your model
#####################################################################
| 13,134 | 47.468635 | 145 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/Satellite Map Generation/main.py | from create_map import create_map
def take_screenshot(lat: float, long: float, row: int, col: int, number: int, file_name: str):
"""
Args:
lat: Latitude of the left corner
long: Longitude of the left corner
row: Row count
col: Column count
number: Numbering to output file
Returns:
"""
create_map(
lat_start=lat,
long_start=long,
zoom=20,
number_rows=row,
number_cols=col,
scale=1.0,
sleep_time=2,
offset_left=0.1666666,
offset_top=0.1666666,
offset_right=0.1666667,
offset_bottom=0.1666667,
outfile=file_name
)
if __name__=='__main__':
# Example: 5x5 -> 25 images
Lat, Long = 40.000138757873195, -83.01825366047777 # 3x4, a smaller example map
rows, cols = 3, 3
# Lat, Long = 40.01835966827935, -83.03297664244631 # 30*17 Larger Map, 2.3km^2
# rows, cols = 30, 17
take_screenshot(
lat=Lat, # First image center latitude
long=Long, # First image center longitude
row=rows,
col=cols,
file_name="image", # Map image: "image-map-{number}.png"
number=0
) | 1,202 | 25.152174 | 94 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/Satellite Map Generation/create_map.py | import os
import time
import tkinter
import numpy as np
from PIL import Image
import pyscreenshot
from selenium import webdriver
# Removing fields from Google Maps
remove_from_view = [
"var element = document.getElementById(\"omnibox-container\");element.remove();",
"var element = document.getElementById(\"watermark\");element.remove();",
"var element = document.getElementById(\"vasquette\");element.remove();",
"var element = document.getElementsByClassName(\"app-viewcard-strip\");element[0].remove();",
"var element = document.getElementsByClassName(\"scene-footer-container\");element[0].remove();",
]
# Removing labels from Google Maps Satellite View
remove_labels = [
"document.getElementsByClassName(\"t9hXV-cdLCv-checkbox\")[1].click();",
]
def js_code_execute(driver, js_string: str):
"""Execute the JS code"""
driver.execute_script(js_string)
def get_screen_resolution() -> tuple:
"""Return tuple of (width, height) of screen resolution in pixels."""
root = tkinter.Tk()
root.withdraw()
return root.winfo_screenwidth(), root.winfo_screenheight()
def calc_latitude_shift(screen_height: int, percent_hidden: float, zoom: int) -> float: # up&down
"""Return the amount to shift latitude per row of screenshots."""
return -0.000002968 * screen_height * (1 - percent_hidden) * (1 / 1.7 ** (zoom - 18)) # zoom=20
# return -0.0000025235 * screen_height * (1 - percent_hidden) * (1 / 1.7 ** (zoom - 18)) # zoom=21
def calc_longitude_shift(screen_width: int, percent_hidden: float, zoom: int) -> float:# left&right
"""Return the amount to shift longitude per column of screenshots."""
return 0.000003876 * screen_width * (1 - percent_hidden) * (1 / 1.7 ** (zoom - 18)) # zoom=10
# return 0.0000032945 * screen_width * (1 - percent_hidden) * (1 / 1.7 ** (zoom - 18)) # zoom=21
def screenshot(screen_width: int, screen_height: int,
offset_left: float, offset_top: float,
offset_right: float, offset_bottom: float) -> Image:
"""Return a screenshot of only the pure maps area."""
x1 = int(offset_left * screen_width)
y1 = int(offset_top * screen_height)
x2 = int((offset_right * -screen_width) + screen_width)
y2 = int((offset_bottom * -screen_height) + screen_height)
# image = pyscreenshot.grab()
image = pyscreenshot.grab(bbox=(x1, y1, x2, y2))
return image
def scale_image(image: Image, scale: float) -> Image:
"""Scale an Image by a proportion, maintaining aspect ratio."""
width = round(image.width * scale)
height = round(image.height * scale)
image.thumbnail((width, height))
return image
def create_map(lat_start: float, long_start: float, zoom: int,
number_rows: int, number_cols: int,
scale: float = 1, sleep_time: float = 0,
offset_left: float = 0, offset_top: float = 0,
offset_right: float = 0, offset_bottom: float = 0,
outfile: str = None):
# Create a map or satellite image given a waypoint
"""
Args:
lat_start: Top-left coordinate to start taking screenshots.
long_start: Top-left coordinate to start taking screenshots.
number_rows: Number of rows to take screenshot.
number_cols: Number of columns to to create screenshot.
scale: Percent to scale each image to reduce final resolution
and filesize. Should be a float value between 0 and 1.
Recommend to leave at 1 for production, and between 0.05
and 0.2 for testing.
sleep_time: Seconds to sleep between screenshots.
Needed because Gmaps has some AJAX queries that will make
the image better a few seconds after confirming page load.
Recommend 0 for testing, and 3-5 seconds for production.
offset_*: Percent of each side to crop from screenshots.
Each should be a float value between 0 and 1. Offsets should
account for all unwanted screen elements, including:
taskbars, windows, multiple displays, and Gmaps UI (minimap,
search box, compass/zoom buttons). Defaults are set for an
Ubuntu laptop with left-side taskbar, and will need to be
tuned to the specific machine and setup where it will be run.
outfile: If provided, the program will save the final image to
this filepath. Otherwise, it will be saved in the current
working directory with name 'testing-<timestamp>.png'
offset_right: Right offset.
offset_top: Top offset.
offset_bottom: Bottom offset.
offset_left: Left offset.
"""
# DRIVER Selection
# Chromedriver should be in the current directory.
# Modify these commands to find proper driver Chrome or Firefox
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome(options=options, executable_path=r'./chromedriver.exe')
driver.maximize_window()
# Calculate amount to shift lat/long each screenshot
screen_width, screen_height = get_screen_resolution()
# Shifting values for lat and long
lat_shift = calc_latitude_shift(screen_height, (offset_top + offset_bottom), zoom)
long_shift = calc_longitude_shift(screen_width, (offset_left + offset_right), zoom)
# Writing coordinates to the file
f = open("./datasets/coordinates.txt", "w+")
"""
i = 0 -> Map View
i = 1 -> Satellite View
"""
i = 1
for row in range(number_rows):
for col in range(number_cols):
latitude = lat_start + (lat_shift * row)
longitude = long_start + (long_shift * col)
url = 'https://www.google.com/maps/'
# Map URL
if i == 0:
url += '@{lat},{long},{z}z'.format(lat=latitude, long=longitude, z=zoom)
# Satellite URL
elif i == 1:
url += '@{lat},{long},{z}z/data=!3m1!1e3'.format(lat=latitude, long=longitude, z=zoom)
driver.get(url)
time.sleep(5)
# Remove labels from Satellite view
if i == 1:
js_code_execute(driver, remove_labels[0])
time.sleep(3)
# js_code_execute(driver, remove_labels[1])
# Remove fields from Map view
for j in remove_from_view:
js_code_execute(driver, j)
# Let the map load all assets before taking a screenshot
time.sleep(sleep_time)
image = screenshot(screen_width, screen_height, offset_left, offset_top, offset_right, offset_bottom)
# Scale image up or down if desired, then save in memory
image = scale_image(image, scale)
map_path = './datasets/satellite_imgs/sat_%d_%d.png'%(row, col)
image.save(map_path)
f.write(f'{row}_{col}\tLat={latitude}\tLong={longitude}\n')
# Close the browser
driver.close()
driver.quit()
f.close() | 7,084 | 39.953757 | 113 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/test.py | from pathlib import Path
import os
import gc
import argparse
import cv2
from PIL import Image
Image.MAX_IMAGE_PIXELS = 933120000
import numpy as np
import matplotlib.cm as cm
from pyqtree import Index
import pickle
import torch
import time
from models.matching import Matching
from models.utils.utils import AverageTimer, VideoStreamer, frame2tensor, remove_kpts_on_building, segment_keypoints, update_last_data
from models.utils.utils_loc import generate_kml, retrieve_init_pixposition, update_current_GPS, UAV_loc_by_pix_PAffine
from models.utils.utils_plot import make_localization_plot
torch.set_grad_enabled(False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SuperGlue demo',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input', type=str, default='./assets/DJI_data/images/',
help='URL of an IP camera, '
'or path to an image directory or movie file')
parser.add_argument(
'--output_dir', type=str, default='./output/images/',
help='Directory where to write output frames (If None, no output)')
parser.add_argument(
'--image_glob', type=str, nargs='+', default=['*.png', '*.jpg', '*.jpeg'],
help='Glob if a directory of images is specified')
parser.add_argument(
'--skip', type=int, default=1,
help='Images to skip if input is a movie or directory')
parser.add_argument(
'--max_length', type=int, default=1000000,
help='Maximum length if input is a movie or directory')
parser.add_argument(
'--resize', type=int, nargs='+', default=[1280, 720],
help='Resize the input image before running inference. If two numbers, '
'resize to the exact dimensions, if one number, resize the max '
'dimension, if -1, do not resize')
parser.add_argument(
'--superglue', choices={'indoor', 'outdoor'}, default='outdoor',
help='SuperGlue weights')
parser.add_argument(
'--apply_GIS', action='store_true',
help='segment matches keypoints from building and non-building')
parser.add_argument(
'--max_keypoints', type=int, default=-1,
help='Maximum number of keypoints detected by Superpoint'
' (\'-1\' keeps all keypoints)')
parser.add_argument(
'--keypoint_threshold', type=float, default=0.005,
help='SuperPoint keypoint detector confidence threshold')
parser.add_argument(
'--nms_radius', type=int, default=4,
help='SuperPoint Non Maximum Suppression (NMS) radius'
' (Must be positive)')
parser.add_argument(
'--sinkhorn_iterations', type=int, default=20,
help='Number of Sinkhorn iterations performed by SuperGlue')
parser.add_argument(
'--match_threshold', type=float, default=0.2,
help='SuperGlue match threshold')
parser.add_argument(
'--switch_threshold', type=int, default=50,
help='Threshold for switching keypoints from non-building to building')
parser.add_argument(
'--patience', type=int, default=10,
help='Patience for early stopping if UAV position was not updated over 10 seconds (video) or 10 frames(images), 0 is off.')
parser.add_argument(
'--KF_dt', type=float, default=1.0,
help='Time between steps in seconds')
parser.add_argument(
'--show_keypoints', action='store_true',
help='Show the detected keypoints')
parser.add_argument(
'--matching_vis', action='store_true',
help='Show the matched pairs')
parser.add_argument(
'--force_cpu', action='store_true',
help='Force pytorch to run in CPU mode.')
parser.add_argument(
'--satmap_init_gps', type=float, nargs='+', default=[40.01872927, -83.033835], # large sat
help='GPS of top-left corner of satellite map')
parser.add_argument(
'--Init_GPS', type=float, nargs='+', default=[40.012701, -83.009691], # Demo starting point GPS
help='Initial drone flight GPS')
parser.add_argument(
'--Orien', type=float, default=0.0,
help='UAV initial orientation is the angel to initially rotate first image clockwise to North direction, ranging from 0-360.')
parser.add_argument(
'--Init_height', type=float, default=None,
help='UAV initial flight height')
parser.add_argument(
'--bin_interval', type=int, default=10,
help='Divide 360 degrees into multiple bins, each bin shares certain degrees')
parser.add_argument(
'--range', type=int, nargs='+', default=[900, 900],
help='Crop partial satellite image size (WxH) as basemap for matching')
parser.add_argument(
'--update_freq', type=int, default=3,
help='Basemap update frequency. Update basemap once UAV center moves out of 1/k basemap range')
opt = parser.parse_args()
print(opt)
if len(opt.resize) == 2 and opt.resize[1] == -1:
opt.resize = opt.resize[0:1]
if len(opt.resize) == 2:
print('Will resize to {}x{} (WxH)'.format(
opt.resize[0], opt.resize[1]))
elif len(opt.resize) == 1 and opt.resize[0] > 0:
print('Will resize max dimension to {}'.format(opt.resize[0]))
elif len(opt.resize) == 1:
print('Will not resize images')
else:
raise ValueError('Cannot specify more than two integers for --resize')
device = 'cuda' if torch.cuda.is_available() and not opt.force_cpu else 'cpu'
print('Running inference on device \"{}\"'.format(device))
config = {
'superpoint': {
'nms_radius': opt.nms_radius,
'keypoint_threshold': opt.keypoint_threshold,
'max_keypoints': opt.max_keypoints
},
'superglue': {
'weights': opt.superglue,
'sinkhorn_iterations': opt.sinkhorn_iterations,
'match_threshold': opt.match_threshold,
}
}
matching = Matching(config).eval().to(device)
timer = AverageTimer()
# Load sat map info and its quadtree indexing file
satmap_kpts = np.load('./featurebase/satmap_kpts.npz')
image0, keypoints0, descriptors0, scores0 = satmap_kpts['image0'], satmap_kpts['keypoints0'], satmap_kpts['descriptors0'], satmap_kpts['scores0']
del satmap_kpts; gc.collect()
print('Satellite image size is {}x{} (HxW), containing {} keypoints'.format(*image0.shape, len(keypoints0)))
print('Max basemap range is {}x{} (WxH)'.format(*opt.range))
timer.update('Successfully loaded satellite map data, loading time',printout=True)
if os.path.exists('./featurebase/QuadTree_idx.pkl'):
with open('./featurebase/QuadTree_idx.pkl', 'rb') as inp:
spindex = pickle.load(inp)
else:
spindex = Index(bbox=(0, 0, image0.shape[1], image0.shape[0])) # Area of WxH
for i in range(len(keypoints0)):
w, h = keypoints0[i]
spindex.insert(i, (w,h,w,h))
# save quadtree indexing
with open('./featurebase/QuadTree_idx.pkl', 'wb') as outp:
pickle.dump(spindex, outp, pickle.HIGHEST_PROTOCOL)
timer.update('Successfully loaded satellite keypoints quadtree indexing, loading time',printout=True)
# Load satellite image GIS labels
mask = np.asarray(Image.open('./featurebase/GIS_mask.png'), dtype=np.int32) if opt.apply_GIS else None
timer.update('Successfully loaded GIS data, loading time',printout=True)
# Initialize frame0 (last_data) at the beginning
c_w, c_h = retrieve_init_pixposition(opt.satmap_init_gps, opt.Init_GPS)# basemap center in pixel distance in reference to top-left corner of satellite map
r_w, r_h = min(opt.range[0], c_w), min(opt.range[1], c_h) # in case it reaches satmap boundary
xmin, ymin, xmax, ymax = c_w-r_w, c_h-r_h, c_w+r_w, c_h+r_h
base_map = image0[ymin:ymax, xmin:xmax]
UAV_pix_pos_offset = [c_w-r_w, c_h-r_h]
timer.reset()
last_data, labels = update_last_data((image0, keypoints0, descriptors0, scores0), mask, spindex, (xmin, ymin, xmax, ymax), device) # return updated GIS labels if required
timer.update('Successfully updated last data, updating time',printout=True)
if opt.output_dir is not None:
print('==> Will write outputs to {}'.format(opt.output_dir))
Path(opt.output_dir).mkdir(exist_ok=True)
# dataloader
vs = VideoStreamer(opt)
frame, ret = vs.next_frame(1.0, go_next=False)
assert ret, 'Error when reading the first frame (try different --input?)'
# Initial parameters setup
timer = AverageTimer()
center, height = (r_w, r_h), opt.Init_height
not_valid, points, img_box = None, None, None
GPS = [] # save GPS as kml file which could be visualized at Google Earth
pred_GPS = opt.Init_GPS
Bins = round(opt.Orien/opt.bin_interval)
not_updated, offset, update_scale = 0, 0, 1.0
while True:
# update UAV rotation bins
Bins -= offset
Bins = (360/opt.bin_interval+Bins) if Bins<0 else Bins%(360/opt.bin_interval)
# update basemap range if center shift over range/2
if abs(center[0]-r_w)>r_w/opt.update_freq or abs(center[1]-r_h)>r_h/opt.update_freq:
c_w, c_h = center[0]+UAV_pix_pos_offset[0], center[1]+UAV_pix_pos_offset[1]
r_w, r_h = min(opt.range[0], c_w), min(opt.range[1], c_h) # in case it reaches satmap boundary
xmin, ymin, xmax, ymax = c_w-r_w, c_h-r_h, c_w+r_w, c_h+r_h
last_data, labels = update_last_data((image0, keypoints0, descriptors0, scores0), mask, spindex, (xmin, ymin, xmax, ymax), device) # return updated GIS labels if required
base_map = image0[ymin:ymax, xmin:xmax]
center, UAV_pix_pos_offset = (r_w, r_h), [c_w-r_w, c_h-r_h]
frame, ret = vs.next_frame(update_scale, rotate=True, bins=Bins)
if not ret or not_updated>opt.patience:
print('Finished UAV Geolocalization Inference')
break
stem1 = vs.i-1
timer.update('data')
frame_tensor = frame2tensor(frame, device)
pred = matching({**last_data, 'image1': frame_tensor})
kpts0 = last_data['keypoints0'][0].cpu().numpy()
kpts1 = pred['keypoints1'][0].cpu().numpy()
matches = pred['matches0'][0].cpu().numpy()
confidence = pred['matching_scores0'][0].cpu().numpy()
valid = matches > -1
if opt.apply_GIS:
valid, not_valid, use_ground, mkpts_count = segment_keypoints(valid, labels, opt.switch_threshold)
mkpts0 = kpts0[valid]
mkpts1 = kpts1[matches[valid]]
# keep matched keypoints not selected
mkpts0_other = kpts0[not_valid]
mkpts1_other = kpts1[matches[not_valid]]
color = cm.jet(confidence[valid])
timer.update('Matching')
# Geolocalize UAV once matches keypoints are over 50
if len(mkpts0)>=opt.switch_threshold:
mkpts = (use_ground, mkpts0, mkpts1, mkpts0_other, mkpts1_other)
# Geolocalize UAV with matched keypoints
center, points, img_box, M, offset, update_scale, avg_building_h = UAV_loc_by_pix_PAffine(frame, mkpts, UAV_pix_pos_offset, opt, vs.scale, not_updated, bins=Bins)
current_GPS = update_current_GPS(opt.satmap_init_gps, (center[0]+UAV_pix_pos_offset[0], center[1]+UAV_pix_pos_offset[1]))
height = -1.23904244+vs.scale*111.67527558
GeoLoc, not_updated = True, 0
else:
GeoLoc, offset = False, 0 # Initialize rotation offset
not_updated = not_updated+1 # Not able to geolocalize UAV, not_updated count+1
M, update_scale = [], 1.0 # Zeroize PAffine transformation mask and scale if unable to geolocalize UAV
print('Don\'t have enough matched keypoint pairs over {} frames'.format(not_updated))
if GeoLoc:
GPS.append([stem1, *current_GPS])
timer.update('Geolocalization')
# Visualize the matches.
if opt.matching_vis:
text = [
'Estimated GPS: ({:.6f}, {:.6f})'.format(*current_GPS),
'Heading Direction (degrees): {}'.format(int(360-Bins*opt.bin_interval)%360), # heading_direction = 360 - rotation_angle_offset
'Flight Height (meters): {}'.format(int(round(height)))
]
# Display extra parameter info.
k_thresh = matching.superpoint.config['keypoint_threshold']
m_thresh = matching.superglue.config['match_threshold']
small_text = [
'Keypoints: {}:{}'.format(len(kpts0), len(kpts1)),
'Ground/Building/Total: {}/{}/{}'.format(*mkpts_count, sum(mkpts_count)),
'Inliers pct: {:.2f}%'.format(np.sum(M)/len(M)*100),
'Scale/Update_scale : {:.2f}/{:.4f}'.format(vs.scale, update_scale)
]
out = make_localization_plot(GeoLoc, base_map, frame, kpts0, kpts1, mkpts0, mkpts1,color, opt.resize, center, points,
img_box, text, path=None, show_keypoints=opt.show_keypoints, small_text=small_text)
out = cv2.resize(out, (0,0), fx=1/2, fy=1/2)
# save sat image and frame t matched output
if opt.output_dir is not None:
stem = 'matches_{:06}'.format(stem1)
out_file = str(Path(opt.output_dir, stem + '.png'))
print('\n\nWriting image to {}'.format(out_file))
cv2.imwrite(out_file, out)
timer.update('Matching Vis')
timer.print(text='Timer {:04d}'.format(stem1))
cv2.destroyAllWindows()
vs.cleanup()
# save predicted GPS to .txt file
# save predicted current GPS
f = open(opt.output_dir+"GPS_pred.txt", "w")
for item in GPS:
f.write(f'{item[0]}\t{item[1]}\t{item[2]}\n')
f.close()
# save predicted GPS as .kml file
GPS_kml = [(item[2], item[1], 1.0) for item in GPS]
kml = generate_kml(GPS_kml, is_gt=False)
kml.save(str(Path(opt.output_dir, 'GPS_pred.kml')))
print('Saving predicted UAV GPS as .txt and .kml file')
print('Inference done!')
| 14,274 | 45.347403 | 182 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/Feature_extractor.py | from pathlib import Path
import argparse
import numpy as np
import torch
import json
import os
from models.matching import Matching
from models.utils.utils import (AverageTimer, VideoStreamer, load_encoder_img, frame2tensor)
torch.set_grad_enabled(False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SuperGlue demo',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input', type=str, default='0',
help='ID of a USB webcam, URL of an IP camera, '
'or path to an image directory or movie file')
parser.add_argument(
'--output_dir', type=str, default=None,
help='Directory where to write output frames (If None, no output)')
parser.add_argument(
'--resize', type=int, nargs='+', default=[1280, 720],
help='Resize the input image before running inference. If two numbers, '
'resize to the exact dimensions, if one number, resize the max '
'dimension, if -1, do not resize')
parser.add_argument(
'--map_row_col', type=int, nargs='+', default=[4,4],
help='Map composed with row*col sub-maps')
parser.add_argument(
'--superglue', choices={'indoor', 'outdoor'}, default='outdoor',
help='SuperGlue weights')
parser.add_argument(
'--max_keypoints', type=int, default=-1,
help='Maximum number of keypoints detected by Superpoint'
' (\'-1\' keeps all keypoints)')
parser.add_argument(
'--keypoint_threshold', type=float, default=0.005,
help='SuperPoint keypoint detector confidence threshold')
parser.add_argument(
'--nms_radius', type=int, default=4,
help='SuperPoint Non Maximum Suppression (NMS) radius'
' (Must be positive)')
parser.add_argument(
'--sinkhorn_iterations', type=int, default=20,
help='Number of Sinkhorn iterations performed by SuperGlue')
parser.add_argument(
'--match_threshold', type=float, default=0.2,
help='SuperGlue match threshold')
parser.add_argument(
'--force_cpu', action='store_true',
help='Force pytorch to run in CPU mode.')
opt = parser.parse_args()
print(opt)
if len(opt.resize) == 2 and opt.resize[1] == -1:
opt.resize = opt.resize[0:1]
if len(opt.resize) == 2:
print('Will resize to {}x{} (WxH)'.format(
opt.resize[0], opt.resize[1]))
elif len(opt.resize) == 1 and opt.resize[0] > 0:
print('Will resize max dimension to {}'.format(opt.resize[0]))
elif len(opt.resize) == 1:
print('Will not resize images')
else:
raise ValueError('Cannot specify more than two integers for --resize')
device = 'cuda' if torch.cuda.is_available() and not opt.force_cpu else 'cpu'
print('Running inference on device \"{}\"'.format(device))
config = {
'superpoint': {
'nms_radius': opt.nms_radius,
'keypoint_threshold': opt.keypoint_threshold,
'max_keypoints': opt.max_keypoints
},
'superglue': {
'weights': opt.superglue,
'sinkhorn_iterations': opt.sinkhorn_iterations,
'match_threshold': opt.match_threshold,
}
}
matching = Matching(config).eval().to(device)
keys = ['keypoints', 'scores', 'descriptors']
if opt.output_dir is not None:
print('==> Will write outputs to {}'.format(opt.output_dir))
Path(opt.output_dir).mkdir(exist_ok=True)
# Load timer and dataloader
print('==> Processing image directory input: {}'.format(opt.input))
img_dirs = []
for i in range(opt.map_row_col[0]):
for j in range(opt.map_row_col[1]):
dir = 'sat_{}_{}.png'.format(i,j)
img_dirs.append(opt.input+dir)
if len(opt.resize) == 1:
img = load_encoder_img(img_dirs[0], opt.resize)
opt.resize = [img.shape[1], img.shape[0]]
# Initilize feature keypoints
kpts = {'keypoints0':np.empty([0,2]),
'scores0':np.empty([0]),
'descriptors0':np.empty([256,0]),
'image0':np.empty([opt.resize[1]*opt.map_row_col[0], opt.resize[0]*opt.map_row_col[1]])}
for i, imdir in enumerate(img_dirs):
frame = load_encoder_img(imdir, opt.resize)
frame_tensor = frame2tensor(frame, device)
last_data = matching.superpoint({'image': frame_tensor})
last_data = {k+'0': last_data[k][0].cpu().numpy() for k in keys}
row = opt.resize[1]*(i//opt.map_row_col[1])
col = opt.resize[0]*(i%opt.map_row_col[1])
print('row,col:', row, col)
# Reorgnize keypoints
last_data['keypoints0'] = last_data['keypoints0']+np.array([col,row])
kpts['keypoints0'] = np.concatenate((kpts['keypoints0'],last_data['keypoints0']), axis=0)
kpts['scores0'] = np.concatenate((kpts['scores0'],last_data['scores0']), axis=0)
kpts['descriptors0'] = np.concatenate((kpts['descriptors0'],last_data['descriptors0']), axis=1)
kpts['image0'][row:row+opt.resize[1], col:col+opt.resize[0]] = frame
image0_info = {'keypoints0':kpts['keypoints0'],
'scores0':kpts['scores0'],
'descriptors0':kpts['descriptors0'],
'image0':kpts['image0']}
# save kpts into npz file
np.savez(opt.output_dir+'/satmap_kpts.npz', **image0_info)
| 5,454 | 38.528986 | 103 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/matching.py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
import torch
from .superpoint import SuperPoint
from .superglue import SuperGlue
class Matching(torch.nn.Module):
""" Image Matching Frontend (SuperPoint + SuperGlue) """
def __init__(self, config={}):
super().__init__()
self.superpoint = SuperPoint(config.get('superpoint', {}))
self.superglue = SuperGlue(config.get('superglue', {}))
def forward(self, data):
""" Run SuperPoint (optionally) and SuperGlue
SuperPoint is skipped if ['keypoints0', 'keypoints1'] exist in input
Args:
data: dictionary with minimal keys: ['image0', 'image1']
"""
pred = {}
# Extract SuperPoint (keypoints, scores, descriptors) if not provided
if 'keypoints0' not in data:
pred0 = self.superpoint({'image': data['image0']})
pred = {**pred, **{k+'0': v for k, v in pred0.items()}}
if 'keypoints1' not in data:
pred1 = self.superpoint({'image': data['image1']})
pred = {**pred, **{k+'1': v for k, v in pred1.items()}}
# Batch all features
# We should either have i) one image per batch, or
# ii) the same number of local features for all images in the batch.
data = {**data, **pred}
for k in data:
if isinstance(data[k], (list, tuple)):
data[k] = torch.stack(data[k])
# Perform the matching
pred = {**pred, **self.superglue(data)}
return pred
| 3,417 | 39.211765 | 77 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/superglue.py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
def MLP(channels: list, do_bn=True):
""" Multi-layer perceptron """
n = len(channels)
layers = []
for i in range(1, n):
layers.append(
nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
if do_bn:
layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def normalize_keypoints(kpts, image_shape):
""" Normalize keypoints locations based on image image_shape"""
_, _, height, width = image_shape
one = kpts.new_tensor(1)
size = torch.stack([one*width, one*height])[None]
center = size / 2
scaling = size.max(1, keepdim=True).values * 0.7
return (kpts - center[:, None, :]) / scaling[:, None, :]
class KeypointEncoder(nn.Module):
""" Joint encoding of visual appearance and location using MLPs"""
def __init__(self, feature_dim, layers):
super().__init__()
self.encoder = MLP([3] + layers + [feature_dim])
nn.init.constant_(self.encoder[-1].bias, 0.0)
def forward(self, kpts, scores):
inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)]
return self.encoder(torch.cat(inputs, dim=1))
def attention(query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
class MultiHeadedAttention(nn.Module):
""" Multi-head attention to increase model expressivitiy """
def __init__(self, num_heads: int, d_model: int):
super().__init__()
assert d_model % num_heads == 0
self.dim = d_model // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, _ = attention(query, key, value)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
class AttentionalPropagation(nn.Module):
def __init__(self, feature_dim: int, num_heads: int):
super().__init__()
self.attn = MultiHeadedAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim=1))
class AttentionalGNN(nn.Module):
def __init__(self, feature_dim: int, layer_names: list):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(feature_dim, 4)
for _ in range(len(layer_names))])
self.names = layer_names
def forward(self, desc0, desc1):
for layer, name in zip(self.layers, self.names):
if name == 'cross':
src0, src1 = desc1, desc0
else: # if name == 'self':
src0, src1 = desc0, desc1
delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
return desc0, desc1
def log_sinkhorn_iterations(Z, log_mu, log_nu, iters: int):
""" Perform Sinkhorn Normalization in Log-space for stability"""
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(iters):
u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
return Z + u.unsqueeze(2) + v.unsqueeze(1)
def log_optimal_transport(scores, alpha, iters: int):
""" Perform Differentiable Optimal Transport in Log-space for stability"""
b, m, n = scores.shape
one = scores.new_tensor(1)
ms, ns = (m*one).to(scores), (n*one).to(scores)
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
couplings = torch.cat([torch.cat([scores, bins0], -1),
torch.cat([bins1, alpha], -1)], 1)
norm = - (ms + ns).log()
log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm])
log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm])
log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1)
Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters)
Z = Z - norm # multiply probabilities by M+N
return Z
def arange_like(x, dim: int):
return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1
class SuperGlue(nn.Module):
"""SuperGlue feature matching middle-end
Given two sets of keypoints and locations, we determine the
correspondences by:
1. Keypoint Encoding (normalization + visual feature and location fusion)
2. Graph Neural Network with multiple self and cross-attention layers
3. Final projection layer
4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
5. Thresholding matrix based on mutual exclusivity and a match_threshold
The correspondence ids use -1 to indicate non-matching points.
Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763
"""
default_config = {
'descriptor_dim': 256,
'weights': 'indoor',
'keypoint_encoder': [32, 64, 128, 256],
'GNN_layers': ['self', 'cross'] * 9,
'sinkhorn_iterations': 100,
'match_threshold': 0.2,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config, **config}
self.kenc = KeypointEncoder(
self.config['descriptor_dim'], self.config['keypoint_encoder'])
self.gnn = AttentionalGNN(
self.config['descriptor_dim'], self.config['GNN_layers'])
self.final_proj = nn.Conv1d(
self.config['descriptor_dim'], self.config['descriptor_dim'],
kernel_size=1, bias=True)
bin_score = torch.nn.Parameter(torch.tensor(1.))
self.register_parameter('bin_score', bin_score)
assert self.config['weights'] in ['indoor', 'outdoor']
path = Path(__file__).parent
path = path / 'weights/superglue_{}.pth'.format(self.config['weights'])
self.load_state_dict(torch.load(str(path)))
print('Loaded SuperGlue model (\"{}\" weights)'.format(
self.config['weights']))
def forward(self, data):
"""Run SuperGlue on a pair of keypoints and descriptors"""
desc0, desc1 = data['descriptors0'], data['descriptors1']
kpts0, kpts1 = data['keypoints0'], data['keypoints1']
if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints
shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1]
return {
'matches0': kpts0.new_full(shape0, -1, dtype=torch.int),
'matches1': kpts1.new_full(shape1, -1, dtype=torch.int),
'matching_scores0': kpts0.new_zeros(shape0),
'matching_scores1': kpts1.new_zeros(shape1),
}
# Keypoint normalization.
kpts0 = normalize_keypoints(kpts0, data['image0'].shape)
kpts1 = normalize_keypoints(kpts1, data['image1'].shape)
# Keypoint MLP encoder.
desc0 = desc0 + self.kenc(kpts0, data['scores0'])
desc1 = desc1 + self.kenc(kpts1, data['scores1'])
# Multi-layer Transformer network.
desc0, desc1 = self.gnn(desc0, desc1)
# Final MLP projection.
mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1)
# Compute matching descriptor distance.
scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1)
scores = scores / self.config['descriptor_dim']**.5
# Run the optimal transport.
scores = log_optimal_transport(
scores, self.bin_score,
iters=self.config['sinkhorn_iterations'])
# Get the matches with score above "match_threshold".
max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1)
indices0, indices1 = max0.indices, max1.indices
mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0)
mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1)
zero = scores.new_tensor(0)
mscores0 = torch.where(mutual0, max0.values.exp(), zero)
mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero)
valid0 = mutual0 & (mscores0 > self.config['match_threshold'])
valid1 = mutual1 & valid0.gather(1, indices1)
indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1))
indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1))
return {
'matches0': indices0, # use -1 for invalid match
'matches1': indices1, # use -1 for invalid match
'matching_scores0': mscores0,
'matching_scores1': mscores1,
}
| 11,316 | 38.848592 | 86 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/superpoint.py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
from pathlib import Path
import torch
from torch import nn
def simple_nms(scores, nms_radius: int):
""" Fast Non-maximum suppression to remove nearby points """
assert(nms_radius >= 0)
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros)
def remove_borders(keypoints, scores, border: int, height: int, width: int):
""" Removes keypoints too close to the border """
mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
mask = mask_h & mask_w
return keypoints[mask], scores[mask]
def top_k_keypoints(keypoints, scores, k: int):
if k >= len(keypoints):
return keypoints, scores
scores, indices = torch.topk(scores, k, dim=0)
return keypoints[indices], scores
def sample_descriptors(keypoints, descriptors, s: int = 8):
""" Interpolate descriptors at keypoint locations """
b, c, h, w = descriptors.shape
keypoints = keypoints - s / 2 + 0.5
keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)],
).to(keypoints)[None]
keypoints = keypoints*2 - 1 # normalize to (-1, 1)
args = {'align_corners': True} if int(torch.__version__[2]) > 2 else {}
descriptors = torch.nn.functional.grid_sample(
descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args)
descriptors = torch.nn.functional.normalize(
descriptors.reshape(b, c, -1), p=2, dim=1)
return descriptors
class SuperPoint(nn.Module):
"""SuperPoint Convolutional Detector and Descriptor
SuperPoint: Self-Supervised Interest Point Detection and
Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629
"""
default_config = {
'descriptor_dim': 256,
'nms_radius': 4,
'keypoint_threshold': 0.005,
'max_keypoints': -1,
'remove_borders': 4,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config, **config}
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256
self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)
self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)
self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)
self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)
self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)
self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)
self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1)
self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convDb = nn.Conv2d(
c5, self.config['descriptor_dim'],
kernel_size=1, stride=1, padding=0)
path = Path(__file__).parent / 'weights/superpoint_v1.pth'
self.load_state_dict(torch.load(str(path)))
mk = self.config['max_keypoints']
if mk == 0 or mk < -1:
raise ValueError('\"max_keypoints\" must be positive or \"-1\"')
print('Loaded SuperPoint model')
def forward(self, data):
""" Compute keypoints, scores, descriptors for image """
# Shared Encoder
x = self.relu(self.conv1a(data['image']))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Compute the dense keypoint scores
cPa = self.relu(self.convPa(x))
scores = self.convPb(cPa)
scores = torch.nn.functional.softmax(scores, 1)[:, :-1]
b, _, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8)
scores = simple_nms(scores, self.config['nms_radius'])
# Extract keypoints
keypoints = [
torch.nonzero(s > self.config['keypoint_threshold'])
for s in scores]
scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)]
# Discard keypoints near the image borders
keypoints, scores = list(zip(*[
remove_borders(k, s, self.config['remove_borders'], h*8, w*8)
for k, s in zip(keypoints, scores)]))
# Keep the k keypoints with highest score
if self.config['max_keypoints'] >= 0:
keypoints, scores = list(zip(*[
top_k_keypoints(k, s, self.config['max_keypoints'])
for k, s in zip(keypoints, scores)]))
# Convert (h, w) to (x, y)
keypoints = [torch.flip(k, [1]).float() for k in keypoints]
# Compute the dense descriptors
cDa = self.relu(self.convDa(x))
descriptors = self.convDb(cDa)
descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)
# Extract descriptors
descriptors = [sample_descriptors(k[None], d[None], 8)[0]
for k, d in zip(keypoints, descriptors)]
return {
'keypoints': keypoints,
'scores': scores,
'descriptors': descriptors,
}
| 8,145 | 39.128079 | 80 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/utils/utils.py | from pathlib import Path
import time
from collections import OrderedDict
from threading import Thread
import numpy as np
import math
from vidgear.gears import CamGear
import cv2
import torch
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
class AverageTimer:
""" Class to help manage printing simple timing of code execution. """
def __init__(self, smoothing=0.3, newline=False):
self.smoothing = smoothing
self.newline = newline
self.times = OrderedDict()
self.will_print = OrderedDict()
self.reset()
def reset(self):
now = time.time()
self.start = now
self.last_time = now
for name in self.will_print:
self.will_print[name] = False
def update(self, name='default', printout=False):
now = time.time()
dt = now - self.last_time
if name in self.times:
dt = self.smoothing * dt + (1 - self.smoothing) * self.times[name]
self.times[name] = dt
self.will_print[name] = True
self.last_time = now
if printout:
print('%s=%.2f s' %(name, dt))
def print(self, text='Timer'):
total = 0.
print('[{}]'.format(text), end=' ')
for key in self.times:
val = self.times[key]
if self.will_print[key]:
print('%s=%.3f' % (key, val), end=' ')
total += val
print('total=%.3f sec {%.1f FPS}' % (total, 1./total), end=' ')
if self.newline:
print(flush=True)
else:
print(end='\r', flush=True)
self.reset()
def load_encoder_img(impath, resize):
""" Read image as grayscale and resize to img_size.
Inputs
impath: Path to input image.
Returns
grayim: uint8 numpy array sized H x W.
"""
grayim = cv2.imread(impath, 0)
if grayim is None:
raise Exception('Error reading image %s' % impath)
w, h = grayim.shape[1], grayim.shape[0]
w_new, h_new = process_resize(w, h, resize)
grayim = cv2.resize(
grayim, (w_new, h_new), interpolation=cv2.INTER_AREA)
return grayim
class VideoStreamer:
""" Class to help process image streams. Four types of possible inputs:"
1.) USB Webcam.
2.) An IP camera
3.) A directory of images (files in directory matching 'image_glob').
4.) A video file, such as an .mp4 or .avi file.
"""
def __init__(self, opt):
self._ip_grabbed = False
self._ip_running = False
self._ip_camera = False
self._ip_image = None
self._ip_index = 0
self.cap = []
self.camera = True
self.video_file = False
self.listing = []
self.resize = opt.resize
self.scale = opt.Init_height*0.00895404+0.01114674 if opt.Init_height else 1.0
self.interp = cv2.INTER_AREA
self.i = 0
self.skip = opt.skip
self.bin_interval = opt.bin_interval
self.max_length = opt.max_length
basedir = opt.input
image_glob = opt.image_glob
if isinstance(basedir, int) or basedir.isdigit():
print('==> Processing USB webcam input: {}'.format(basedir))
self.cap = cv2.VideoCapture(int(basedir))
self.listing = range(0, self.max_length)
elif basedir.startswith(('http', 'rtsp')):
print('==> Processing IP camera input: {}'.format(basedir))
# Available Streams are: [144p, 240p, 360p, 480p, 720p, 1080p, best, worst]
options = {"STREAM_RESOLUTION": "720p", 'CAP_PROP_FPS':5, "THREADED_QUEUE_MODE": False}
self.stream = CamGear(source=basedir, stream_mode = True, logging=True, **options).start() # YouTube Video URL as input
self._ip_camera = True
self.listing = range(0, self.max_length)
opt.KF_dt = 1.0/options['CAP_PROP_FPS']
opt.patience = int(opt.patience*options['CAP_PROP_FPS']/opt.skip)
print('==> Stop if UAV GPS not updated over {} frames'.format(opt.patience))
elif Path(basedir).is_dir():
print('==> Processing image directory input: {}'.format(basedir))
self.listing = list(Path(basedir).glob(image_glob[0]))
for j in range(1, len(image_glob)):
image_path = list(Path(basedir).glob(image_glob[j]))
self.listing = self.listing + image_path
self.listing.sort()
self.listing = self.listing[::self.skip]
self.max_length = np.min([self.max_length, len(self.listing)])
if self.max_length == 0:
raise IOError('No images found (maybe bad \'image_glob\' ?)')
self.listing = self.listing[:self.max_length]
self.camera = False
print('==> Stop if UAV GPS not updated over {} frames'.format(opt.patience))
elif Path(basedir).exists():
print('==> Processing video input: {}'.format(basedir))
self.cap = cv2.VideoCapture(basedir)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
num_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.listing = range(0, num_frames)
self.listing = self.listing[::self.skip]
self.video_file = True
self.max_length = np.min([self.max_length, len(self.listing)])
self.listing = self.listing[:self.max_length]
opt.KF_dt = 1.0/(self.cap.get(cv2.CAP_PROP_FPS)/opt.skip)
opt.patience = int(opt.patience*self.cap.get(cv2.CAP_PROP_FPS)/opt.skip)
print('==> Stop if UAV GPS not updated over {} frames'.format(opt.patience))
else:
raise ValueError('VideoStreamer input \"{}\" not recognized.'.format(basedir))
def load_image(self, impath, rotate, bins):
""" Read image as grayscale and resize to img_size.
Inputs
impath: Path to input image.
Returns
grayim: uint8 numpy array sized H x W.
"""
grayim = cv2.imread(impath, 0)
if grayim is None:
raise Exception('Error reading image %s' % impath)
w, h = grayim.shape[1], grayim.shape[0]
w_resize, h_resize = int(self.resize[0]*self.scale), int(self.resize[1]*self.scale)
w_new, h_new = process_resize(w, h, (w_resize, h_resize))
grayim = cv2.resize(
grayim, (w_new, h_new), interpolation=self.interp)
if rotate:
angle = bins*self.bin_interval
grayim = self.rotate_image(grayim, angle) # angle>0, rotate image counterclockwise
# w_rotate, h_rotate = grayim.shape[1], grayim.shape[0]
# scales = (float(w) / float(w_rotate), float(h) / float(h_rotate))
return grayim
def next_frame(self, scale, go_next=True, rotate=False, bins=0):
""" Return the next frame, and increment internal counter.
Returns
image: Next H x W image.
status: True or False depending whether image was loaded.
"""
if (self.i==self.max_length):
return (None, False)
#update image scale
self.scale = self.scale*scale
if self.camera:
if self._ip_camera:
#Wait for first image, making sure we haven't exited
time.sleep(.001)
image = self.stream.read()
else:
ret, image = self.cap.read()
if ret is False or image is None:
print('VideoStreamer: Cannot get image from camera')
return (None, False)
w, h = image.shape[1], image.shape[0]
if self.video_file:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.listing[self.i])
w_resize, h_resize = int(self.resize[0]*self.scale), int(self.resize[1]*self.scale)
w_new, h_new = process_resize(w, h, (w_resize, h_resize))
image = cv2.resize(image, (w_new, h_new),
interpolation=self.interp)
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if rotate:
angle = bins*self.bin_interval
image = self.rotate_image(image, angle) # angle>0, rotate image counterclockwise
else:
image_file = str(self.listing[self.i])
image = self.load_image(image_file, rotate, bins)
self.i = self.i + 1 if go_next else self.i
return (image, True)
def start_ip_camera_thread(self):
self._ip_thread = Thread(target=self.update_ip_camera, args=())
self._ip_running = True
self._ip_thread.start()
self._ip_exited = False
return self
def update_ip_camera(self):
while self._ip_running:
ret, img = self.cap.read()
if ret is False:
self._ip_running = False
self._ip_exited = True
self._ip_grabbed = False
return
self._ip_image = img
self._ip_grabbed = ret
self._ip_index += 1
#print('IPCAMERA THREAD got frame {}'.format(self._ip_index))
def rotate_image(self, mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
def cleanup(self):
self._ip_running = False
# --- PREPROCESSING ---
def process_resize(w, h, resize):
assert(len(resize) > 0 and len(resize) <= 2)
if len(resize) == 1 and resize[0] > -1:
scale = resize[0] / max(h, w)
w_new, h_new = int(round(w*scale)), int(round(h*scale))
elif len(resize) == 1 and resize[0] == -1:
w_new, h_new = w, h
else: # len(resize) == 2:
w_new, h_new = resize[0], resize[1]
# Issue warning if resolution is too small or too large.
if max(w_new, h_new) < 160:
print('Warning: input resolution is very small, results may vary')
elif max(w_new, h_new) > 2000:
print('Warning: input resolution is very large, results may vary')
return w_new, h_new
def frame2tensor(frame, device):
return torch.from_numpy(frame/255.).float()[None, None].to(device)
def read_image(path, device, resize, rotation, resize_float):
image = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)
if image is None:
return None, None, None
w, h = image.shape[1], image.shape[0]
w_new, h_new = process_resize(w, h, resize)
scales = (float(w) / float(w_new), float(h) / float(h_new))
if resize_float:
image = cv2.resize(image.astype('float32'), (w_new, h_new))
else:
image = cv2.resize(image, (w_new, h_new)).astype('float32')
if rotation != 0:
image = np.rot90(image, k=rotation)
if rotation % 2:
scales = scales[::-1]
inp = frame2tensor(image, device)
return image, inp, scales
def remove_kpts_on_building(features, labels):
# screen out basemap keypoints belonging to building
keys = ['keypoints0', 'scores0', 'descriptors0']
kpts = features['keypoints0'].astype('int')
scores = features['scores0']
descriptors = features['descriptors0']
valid = labels==0
kpts = features['keypoints0'][valid]
descriptors = ((descriptors.T)[valid]).T
scores = scores[valid]
return {'keypoints0':kpts, 'scores0':scores, 'descriptors0':descriptors}
def segment_keypoints(valid, labels, threshold):
ground = labels==0
building = labels==1
grounds = np.logical_and(valid, ground)
buildings = np.logical_and(valid, building)
grounds_sum = sum(grounds) # number of matched non-building keypoints
buildings_sum = sum(buildings) # number of matched building keypoints
# # if non-building valid num<threshold and building valid>threshold, select matched building else select non-building keypoints for localization
# if (grounds_sum<threshold and buildings_sum>threshold) or buildings_sum-grounds_sum>threshold/2:
# return buildings, grounds, False # use buidling kpts for geolcoalization
# return grounds, buildings, True
if grounds_sum>=threshold:
if buildings_sum/grounds_sum<3:
return grounds, buildings, True, (grounds_sum, buildings_sum)
else:
return buildings, grounds, False, (grounds_sum, buildings_sum)
elif buildings_sum>=threshold:
return buildings, grounds, False, (grounds_sum, buildings_sum) # use buidling kpts for geolcoalization
else:
return valid, None, True, (grounds_sum, buildings_sum)
def update_last_data(satmap_kpts, mask, spindex, bbox, device):
xmin, ymin, xmax, ymax = bbox
image0, keypoints0, descriptors0, scores0 = satmap_kpts
matches = spindex.intersect((xmin, ymin, xmax-1, ymax-1)) # quadtree will include lower right boundary, so -1 to exclude keypoints lying on that boundary
keypoints0_ = keypoints0[matches]-[xmin, ymin]
scores0 = scores0[matches]
descriptors0 = descriptors0[:,matches]
keypoints0 = torch.from_numpy(keypoints0_).float().to(device)
scores0 = torch.from_numpy(scores0).float().to(device)
descriptors0 = torch.from_numpy(descriptors0).float().to(device)
image0 = frame2tensor(image0[ymin:ymax, xmin:xmax], device)
last_data = {'keypoints0':[keypoints0], 'scores0':[scores0], 'descriptors0':[descriptors0], 'image0':image0}
if mask is not None:
update_mask = mask[ymin:ymax, xmin:xmax]
# print(range, update_mask.shape)
keypoints0_ = keypoints0_.astype('int')
labels = update_mask[keypoints0_[:,1], keypoints0_[:,0]]
return last_data, labels
else:
return last_data, None | 14,700 | 38.732432 | 157 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/utils/utils_plot.py | import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
# --- VISUALIZATION ---
def make_localization_plot(GeoLoc, image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, size, center, points, img_box,
text, path=None, show_keypoints=False, margin=10, opencv_display=False,
opencv_title='', small_text=[]):
H0, W0 = image0.shape
H1, W1 = image1.shape
# H, W = max(H0, H1), W0 + margin + int(np.sqrt(size[0]**2+size[1]**2)+1)
H, W = max(H0, H1), W0 + margin + W1
# combine image0 and image1
out = 255*np.ones((H, W), np.uint8)
out[:H0, :W0] = image0
out[:H1, W0+margin:W0+margin+W1] = image1
out = np.stack([out]*3, -1)
if GeoLoc:
# # box and center
cx, cy = center
out = cv2.circle(out, (cx, cy), 15, (0, 0, 255), 15)
out = cv2.circle(out, (cx, cy),10, (255, 255, 255), 10)
out = cv2.circle(out, (cx, cy), 5, (0, 0, 255), 5)
# plot matching box
out = cv2.polylines(out,[points],True,(0,0,0),7, cv2.LINE_AA)
for i in range(4):
out = drawline(out,pt1=points[i],pt2=points[(i+1)%4])
# keypoints
if show_keypoints:
kpts0, kpts1 = np.round(kpts0).astype(int), np.round(kpts1).astype(int)
white = (255, 255, 255)
black = (0, 0, 0)
for x, y in kpts0:
cv2.circle(out, (x, y), 2, black, -1, lineType=cv2.LINE_AA)
cv2.circle(out, (x, y), 1, white, -1, lineType=cv2.LINE_AA)
for x, y in kpts1:
cv2.circle(out, (x + margin + W0, y), 2, black, -1,
lineType=cv2.LINE_AA)
cv2.circle(out, (x + margin + W0, y), 1, white, -1,
lineType=cv2.LINE_AA)
# matched points
mkpts0, mkpts1 = np.round(mkpts0).astype(int), np.round(mkpts1).astype(int)
color = (np.array(color[:, :3])*255).astype(int)[:, ::-1]
for (x0, y0), (x1, y1), c in zip(mkpts0, mkpts1, color):
c = c.tolist()
# cv2.line(out, (x0, y0), (x1 + margin + W0, y1),
# color=c, thickness=1, lineType=cv2.LINE_AA)
# display line end-points as circles
cv2.circle(out, (x0, y0), 4, c, -1, lineType=cv2.LINE_AA)
cv2.circle(out, (x1 + margin + W0, y1), 4, c, -1,
lineType=cv2.LINE_AA)
# Scale factor for consistent visualization across scales.
sc = min(H / 640., 2.0)
# Big text.
Ht = int(30 * sc) # text height
txt_color_fg = (0, 0, 255)
txt_color_bg = (0, 0, 0)
for i, t in enumerate(text):
cv2.putText(out, t, (int(8*sc), Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX,
1.0*sc, txt_color_bg, 2, cv2.LINE_AA)
cv2.putText(out, t, (int(8*sc), Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX,
1.0*sc, txt_color_fg, 1, cv2.LINE_AA)
# Small text.
Ht = int(18 * sc) # text height
for i, t in enumerate(reversed(small_text)):
cv2.putText(out, t, (int(8*sc), int(H-Ht*(i+.6))), cv2.FONT_HERSHEY_DUPLEX,
0.5*sc, txt_color_bg, 2, cv2.LINE_AA)
cv2.putText(out, t, (int(8*sc), int(H-Ht*(i+.6))), cv2.FONT_HERSHEY_DUPLEX,
0.5*sc, txt_color_fg, 1, cv2.LINE_AA)
if path is not None:
cv2.imwrite(str(path), out)
if opencv_display:
cv2.imshow(opencv_title, out)
cv2.waitKey(1)
return out
def drawline(img,pt1,pt2,color=(0,0,255),thickness=5,gap=20):
dist =((pt1[0]-pt2[0])**2+(pt1[1]-pt2[1])**2)**.5
pts= []
for i in np.arange(0,dist,gap):
r=i/dist
x=int((pt1[0]*(1-r)+pt2[0]*r)+.5)
y=int((pt1[1]*(1-r)+pt2[1]*r)+.5)
p = (x,y)
pts.append(p)
for p in pts:
cv2.circle(img,p,thickness,color,-1)
return img
def make_center_plot(image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, size, center, points,
text, path=None, show_keypoints=False, margin=10, opencv_display=False,
opencv_title='', small_text=[]):
H0, W0 = image0.shape
H1, W1 = image1.shape
H, W = max(H0, H1), W0 + W1 + margin
# combine image0 and image1
out = 255*np.ones((H, W), np.uint8)
out[:H0, :W0] = image0
out[:H1, W0+margin:] = image1
out = np.stack([out]*3, -1)
# # box and center
colors = [(255, 0, 0), (0, 255, 0), (0, 255, 255), (0, 0, 255)] #blue, green, yellow and red
if center is not None:
for i in range(4):
cx, cy = center[i]
out = cv2.circle(out, (cx, cy), 9, colors[i], 9)
# out = cv2.circle(out, (cx, cy), 6, (255, 255, 255), 6)
# out = cv2.circle(out, (cx, cy), 3, colors[i], 3)
# out = cv2.polylines(out,[points],True,(0,255,0),2, cv2.LINE_AA)
else:
print('Don\'t have enough matched keypoint pairs, relocalizing...')
# keypoints
if show_keypoints:
kpts0, kpts1 = np.round(kpts0).astype(int), np.round(kpts1).astype(int)
white = (255, 255, 255)
black = (0, 0, 0)
for x, y in kpts0:
cv2.circle(out, (x, y), 2, black, -1, lineType=cv2.LINE_AA)
cv2.circle(out, (x, y), 1, white, -1, lineType=cv2.LINE_AA)
for x, y in kpts1:
cv2.circle(out, (x + margin + W0, y), 2, black, -1,
lineType=cv2.LINE_AA)
cv2.circle(out, (x + margin + W0, y), 1, white, -1,
lineType=cv2.LINE_AA)
# matched points
mkpts0, mkpts1 = np.round(mkpts0).astype(int), np.round(mkpts1).astype(int)
color = (np.array(color[:, :3])*255).astype(int)[:, ::-1]
for (x0, y0), (x1, y1), c in zip(mkpts0, mkpts1, color):
c = c.tolist()
# cv2.line(out, (x0, y0), (x1 + margin + W0, y1),
# color=c, thickness=1, lineType=cv2.LINE_AA)
# display line end-points as circles
cv2.circle(out, (x0, y0), 2, c, -1, lineType=cv2.LINE_AA)
cv2.circle(out, (x1 + margin + W0, y1), 2, c, -1,
lineType=cv2.LINE_AA)
# # plot matched points center
# cx, cy = (np.mean(mkpts0, axis=0)).astype(int)
# out = cv2.circle(out, (cx, cy), 9, (0, 0, 255), 9)
# out = cv2.circle(out, (cx, cy), 6, (255, 255, 255), 6)
# out = cv2.circle(out, (cx, cy), 3, (0, 0, 255), 3)
# Scale factor for consistent visualization across scales.
sc = min(H / 640., 2.0)
# Big text.
Ht = int(30 * sc) # text height
txt_color_fg = (255, 255, 255)
txt_color_bg = (0, 0, 0)
for i, t in enumerate(text):
cv2.putText(out, t, (int(8*sc), Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX,
1.0*sc, txt_color_bg, 2, cv2.LINE_AA)
cv2.putText(out, t, (int(8*sc), Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX,
1.0*sc, txt_color_fg, 1, cv2.LINE_AA)
# Small text.
Ht = int(18 * sc) # text height
for i, t in enumerate(reversed(small_text)):
cv2.putText(out, t, (int(8*sc), int(H-Ht*(i+.6))), cv2.FONT_HERSHEY_DUPLEX,
0.5*sc, txt_color_bg, 2, cv2.LINE_AA)
cv2.putText(out, t, (int(8*sc), int(H-Ht*(i+.6))), cv2.FONT_HERSHEY_DUPLEX,
0.5*sc, txt_color_fg, 1, cv2.LINE_AA)
if path is not None:
cv2.imwrite(str(path), out)
if opencv_display:
cv2.imshow(opencv_title, out)
cv2.waitKey(1)
return out | 7,417 | 37.237113 | 118 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/utils/utils_loc.py | import pyproj
import simplekml
import numpy as np
import cv2
import math
# extended libraries for extracting GPS ground truth from drone taken images
import re
import os
import simplekml
def update_current_GPS(sat_gps, pix_c):
GSD = [0.1493, -0.1492] # m/pix
# convert initial GPS to projective distance in meters
# geo_epsg, proj_epsg = "epsg:4326", "epsg:3857"
transformer = pyproj.Transformer.from_crs(4326, 3857)
init_proj = transformer.transform(*sat_gps)
current_proj = [init_proj[i]+pix_c[i]*GSD[i] for i in range(len(init_proj))]
# convert current projective distance to GPS
transformer = pyproj.Transformer.from_crs(3857, 4326)
current_GPS = transformer.transform(*current_proj)
return current_GPS
def retrieve_init_pixposition(sat_gps, init_gps):
GSD = [0.1493, -0.1492] # m/pix
# convert initial GPS to projective distance in meters
# geo_epsg, proj_epsg = "epsg:4326", "epsg:3857"
transformer = pyproj.Transformer.from_crs(4326, 3857)
sat_proj = transformer.transform(*sat_gps)
init_proj = transformer.transform(*init_gps)
pixpos = [int((init_proj[i]-sat_proj[i])/GSD[i]) for i in range(len(init_proj))]
return pixpos
def generate_kml(GPS, is_gt=True):
kml=simplekml.Kml()
start_pt = kml.newpoint(name='Start Point')
start_pt.coords = [GPS[0]]
start_pt.style.labelstyle.scale = 1 # Make the text twice as big
start_pt.style.labelstyle.color = simplekml.Color.white
start_pt.altitudemode = simplekml.AltitudeMode.relativetoground
end_pt = kml.newpoint(name='End Point')
end_pt.coords = [GPS[-1]]
end_pt.style.labelstyle.scale = 1 # Make the text twice as big
end_pt.style.labelstyle.color = simplekml.Color.white
end_pt.altitudemode = simplekml.AltitudeMode.relativetoground
ls = kml.newlinestring(name='3D Path', extrude=1)
ls.coords = GPS
ls.extrude = 1
ls.style.linestyle.width = 3
if is_gt:
ls.style.linestyle.color = simplekml.Color.red
end_pt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/paddle/red-blank.png'
start_pt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/paddle/red-blank.png'
else:
ls.style.linestyle.color = simplekml.Color.yellow
start_pt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/paddle/grn-blank.png'
end_pt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/paddle/grn-blank.png'
ls.altitudemode = simplekml.AltitudeMode.relativetoground
return kml
def UAV_loc_by_pix_DLT(image1, mkpts0, mkpts1, UAV_pix_pos_offset, opt, bins=0):
size = opt.resize
H1, W1 = image1.shape
angle = bins*opt.bin_interval
# project image1 boundaries to image0
src_pts = np.float32(mkpts1).reshape(-1,1,2)
dst_pts = np.float32(mkpts0).reshape(-1,1,2)
hom_reproj_threshold = 3.0 # threshold for homography reprojection error: maximum allowed reprojection error in pixels (to treat a point pair as an inlier)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, ransacReprojThreshold=hom_reproj_threshold)
abs_sin = abs(math.sin(math.radians(angle)))
abs_cos = abs(math.cos(math.radians(angle)))
img_box = np.float32([[size[1]*abs_sin-1,H1],
[0,size[0]*abs_sin-1],
[size[0]*abs_cos-1,0],
[W1,size[1]*abs_cos-1]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(img_box, M)
points= np.int32(dst)
M = cv2.moments(points)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
center = (cx, cy)
return center, points, mask
def UAV_loc_by_pix_PAffine(image1, mkpts, UAV_pix_pos_offset, opt, vs_scale, not_updated, bins=0):
# partial affine (rotation, scale and translation)
use_ground, mkpts0, mkpts1, mkpts0_other, mkpts1_other = mkpts
size = opt.resize
H1, W1 = image1.shape
angle = bins*opt.bin_interval
scale_ground, scale_building = None, None
# project image1 boundaries to image0
src_pts = np.float32(mkpts1).reshape(-1,2)
dst_pts = np.float32(mkpts0).reshape(-1,2)
reproj_threshold = 3.0 # threshold for homography reprojection error: maximum allowed reprojection error in pixels (to treat a point pair as an inlier)
Mtx, mask = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC, ransacReprojThreshold=reproj_threshold)
theta = np.degrees(np.arctan(Mtx[1,0]/Mtx[0,0]))
offset = round(theta/opt.bin_interval)
if use_ground:
scale_ground = Mtx[0,0]/np.cos(np.radians(theta))
# compute building scale
if len(mkpts0_other)>opt.switch_threshold:
src_pts = np.float32(mkpts1_other).reshape(-1,2)
dst_pts = np.float32(mkpts0_other).reshape(-1,2)
Mtx_scale, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC, ransacReprojThreshold=reproj_threshold)
scale_building = Mtx_scale[0,0]/np.cos(np.radians(np.degrees(np.arctan(Mtx_scale[1,0]/Mtx_scale[0,0]))))
else:
scale_building = Mtx[0,0]/np.cos(np.radians(theta))
# compute ground scale
if len(mkpts0_other)>opt.switch_threshold:
src_pts = np.float32(mkpts1_other).reshape(-1,2)
dst_pts = np.float32(mkpts0_other).reshape(-1,2)
Mtx_scale, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC, ransacReprojThreshold=reproj_threshold)
scale_ground = Mtx_scale[0,0]/np.cos(np.radians(np.degrees(np.arctan(Mtx_scale[1,0]/Mtx_scale[0,0]))))
avg_building_h = 111.67527558*abs(scale_ground-scale_building) if (scale_ground and scale_building) is not None else np.nan
# print(scale_ground, scale_building)
upper_bound, lower_bound = 1.035816**((not_updated+1)*opt.KF_dt), 0.964184**((not_updated+1)*opt.KF_dt)
scale_ground = 1.0 if scale_ground is None else max(min(scale_ground,upper_bound),lower_bound)
# retrieve resize image four vertex
threshold = 360/opt.bin_interval
img_box = retrieve_img_box(H1, W1, size, vs_scale, angle, bins, threshold)
dst = [email protected] # n*3@3*2
points= np.int32(dst)
M = cv2.moments(points)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
center = (cx, cy)
return center, points, img_box, mask, offset, scale_ground, avg_building_h
def UAV_loc_by_pix_Affine(image1, mkpts0, mkpts1, UAV_pix_pos_offset, opt, bins=0):
# partial affine(above) + shearring
size = opt.resize
H1, W1 = image1.shape
angle = bins*opt.bin_interval
# project image1 boundaries to image0
src_pts = np.float32(mkpts1).reshape(-1,2)
dst_pts = np.float32(mkpts0).reshape(-1,2)
reproj_threshold = 3.0 # threshold for homography reprojection error: maximum allowed reprojection error in pixels (to treat a point pair as an inlier)
M, mask = cv2.estimateAffine2D(src_pts, dst_pts, method=cv2.RANSAC, ransacReprojThreshold=reproj_threshold)
threshold = 360/opt.bin_interval
img_box = retrieve_img_box(H1, W1, size, angle, bins, threshold)
dst = [email protected] # n*3@3*2
points= np.int32(dst)
M = cv2.moments(points)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
center = (cx, cy)
current_GPS = update_current_GPS(opt.satmap_init_gps, (cx+UAV_pix_pos_offset[0], cy+UAV_pix_pos_offset[1]))
return current_GPS, center, points, mask
def retrieve_img_box(H1, W1, size, vs_scale, angle, bins, threshold):
abs_sin = abs(math.sin(math.radians(angle)))
abs_cos = abs(math.cos(math.radians(angle)))
size_w, size_h = int(size[0]*vs_scale), int(size[1]*vs_scale)
if 0<=angle<=90 or 180<=angle<=270: # bins 45-60, 15-30
img_box = np.float32([[size_w*abs_cos,0,1],
[0,size_w*abs_sin,1],
[size_h*abs_sin,H1,1],
[W1,size_h*abs_cos,1]]).reshape(-1,3)
else: # bins 0-15, 30-45
img_box = np.float32([[size_h*abs_sin,0,1],
[0,size_h*abs_cos,1],
[size_w*abs_cos,H1,1],
[W1,size_w*abs_sin,1]]).reshape(-1,3)
return img_box | 8,311 | 41.192893 | 160 | py |
d3py | d3py-master/setup.py | #!/usr/bin/env python
from distutils.core import setup
from setuptools import setup
setup(
name='d3py',
version='0.2.3',
description='d3py',
author='Mike Dewar, Micha Gorelick and Adam Laiacano',
author_email='[email protected]',
url='https://github.com/mikedewar/D3py',
packages=['d3py', 'd3py.geoms', ],
package_data={'d3py': ['d3.js','d3py_template.html']},
requires=['pandas','networkx']
)
| 424 | 24 | 58 | py |
d3py | d3py-master/examples/d3py_vega_line.py | import d3py
import pandas as pd
import random
x = range(0, 101, 1)
y = [random.randint(10, 100) for num in range(0, 101, 1)]
df = pd.DataFrame({'x': x, 'y': y})
#Create Pandas figure
fig = d3py.PandasFigure(df, 'd3py_area', port=8000, columns=['x', 'y'])
#Add Vega Area plot
fig += d3py.vega.Line()
#Show figure
fig.show()
| 329 | 16.368421 | 71 | py |
d3py | d3py-master/examples/d3py_multiline.py | import numpy as np
import d3py
import pandas
T = 5*np.pi
x = np.linspace(-T,T,100)
a = 0.05
y = np.exp(-a*x) * np.sin(x)
z = np.exp(-a*x) * np.sin(0.5*x)
df = pandas.DataFrame({
'x' : x,
'y' : y,
'z' : z,
})
with d3py.PandasFigure(df, 'd3py_line', width=600, height=200) as fig:
fig += d3py.geoms.Line('x', 'y', stroke='BlueViolet')
fig += d3py.geoms.Line('x', 'z', stroke='DeepPink')
fig += d3py.xAxis('x')
fig += d3py.yAxis('y')
fig.show()
| 477 | 19.782609 | 70 | py |
d3py | d3py-master/examples/d3py_area.py | import numpy as np
import d3py
import pandas
N = 500
T = 5*np.pi
x = np.linspace(-T,T,N)
y = np.sin(x)
y0 = np.cos(x)
df = pandas.DataFrame({
'x' : x,
'y' : y,
'y0' : y0,
})
with d3py.PandasFigure(df, 'd3py_area', width=500, height=250) as fig:
fig += d3py.geoms.Area('x', 'y', 'y0')
fig += d3py.geoms.xAxis('x')
fig += d3py.geoms.yAxis('y')
fig.show()
| 384 | 16.5 | 70 | py |
d3py | d3py-master/examples/d3py_scatter.py | import numpy as np
import pandas
import d3py
n = 400
df = pandas.DataFrame({
'd1': np.arange(0,n),
'd2': np.random.normal(0, 1, n)
})
with d3py.PandasFigure(df, "example scatter plot using d3py", width=400, height=400) as fig:
fig += d3py.Point("d1", "d2", fill="DodgerBlue")
fig += d3py.xAxis('d1', label="Random")
fig += d3py.yAxis('d2', label="Also random")
fig.show()
| 398 | 23.9375 | 92 | py |
d3py | d3py-master/examples/d3py_vega_area.py | import d3py
import pandas as pd
import random
x = range(0, 21, 1)
y = [random.randint(25, 100) for num in range(0, 21, 1)]
df = pd.DataFrame({'x': x, 'y': y})
#Create Pandas figure
fig = d3py.PandasFigure(df, 'd3py_area', port=8080, columns=['x', 'y'])
#Add Vega Area plot
fig += d3py.vega.Area()
#Add interpolation to figure data
fig.vega + ({'value': 'basis'}, 'marks', 0, 'properties', 'enter',
'interpolate')
fig.show() | 441 | 22.263158 | 71 | py |
d3py | d3py-master/examples/d3py_vega_scatter.py | import d3py
import pandas as pd
import random
n = 400
df = pd.DataFrame({'d1': np.arange(0,n),'d2': np.random.normal(0, 1, n)})
#Create Pandas figure
fig = d3py.PandasFigure(df, 'd3py_area', port=8000, columns=['d1', 'd2'])
#Add Vega Area plot
fig += d3py.vega.Scatter()
#Show figure
fig.show()
| 300 | 16.705882 | 73 | py |
d3py | d3py-master/examples/d3py_vega_bar.py | import d3py
import pandas as pd
import random
x = ['apples', 'oranges', 'grapes', 'bananas', 'plums', 'blackberries']
y = [10, 17, 43, 23, 31, 18]
df = pd.DataFrame({'x': x, 'y': y})
#Create Pandas figure
fig = d3py.PandasFigure(df, 'd3py_area', port=8000, columns=['x', 'y'])
#Add Vega Area plot
fig += d3py.vega.Bar()
#Show figure
fig.show() | 348 | 19.529412 | 71 | py |
d3py | d3py-master/examples/d3py_line.py | import numpy as np
import d3py
import pandas
T = 5*np.pi
x = np.linspace(-T,T,100)
a = 0.05
y = np.exp(-a*x) * np.sin(x)
df = pandas.DataFrame({
'x' : x,
'y' : y
})
with d3py.PandasFigure(df, 'd3py_line', width=600, height=200) as fig:
fig += d3py.geoms.Line('x', 'y', stroke='BlueViolet')
fig += d3py.xAxis('x')
fig += d3py.yAxis('y')
fig.show()
| 374 | 17.75 | 70 | py |
d3py | d3py-master/examples/d3py_bar.py | import pandas
import d3py
import logging
logging.basicConfig(level=logging.DEBUG)
df = pandas.DataFrame(
{
"count" : [1,4,7,3,2,9],
"apple_type": ["a", "b", "c", "d", "e", "f"],
}
)
# use 'with' if you are writing a script and want to serve this up forever
with d3py.PandasFigure(df) as p:
p += d3py.Bar(x = "apple_type", y = "count", fill = "MediumAquamarine")
p += d3py.xAxis(x = "apple_type")
p.show()
# if you are writing in a terminal, use without 'with' to keep everything nice
# and interactive
"""
p = d3py.PandasFigure(df)
p += d3py.Bar(x = "apple_type", y = "count", fill = "MediumAquamarine")
p += d3py.xAxis(x = "apple_type")
p.show()
"""
| 692 | 22.896552 | 78 | py |
d3py | d3py-master/examples/d3py_graph.py | import d3py
import networkx as nx
import logging
logging.basicConfig(level=logging.DEBUG)
G=nx.Graph()
G.add_edge(1,2)
G.add_edge(1,3)
G.add_edge(3,2)
G.add_edge(3,4)
G.add_edge(4,2)
# use 'with' if you are writing a script and want to serve this up forever
with d3py.NetworkXFigure(G, width=500, height=500) as p:
p += d3py.ForceLayout()
p.show()
| 359 | 19 | 74 | py |
d3py | d3py-master/tests/test_javascript.py | #!/usr/bin/python
from d3py import javascript as JS
def test_JavaScript_object_lookup():
g = JS.Selection("g").attr("color", "red")
j = JS.JavaScript() + g
assert(j.get_object("g", JS.Selection) == g)
g.attr("test", "test")
assert(j.get_object("g", JS.Selection) == g)
f = JS.Function("test", None, "return 5")
j += f
assert(j.get_object("test", JS.Function) == f)
f = "console.debug('hello')" + f
assert(j.get_object("test", JS.Function) == f)
if __name__ == "__main__":
test_JavaScript_object_lookup()
| 558 | 20.5 | 50 | py |
d3py | d3py-master/tests/test_figure.py | # -*- coding: utf-8 -*-
'''
Figure Test
-------
Test figure object with nose package:
https://nose.readthedocs.org/en/latest/
'''
import d3py
import nose.tools as nt
class TestFigure():
def setup(self):
'''Setup Figure object for testing'''
self.Figure = d3py.Figure('test figure', 1024, 768, True, 'Asap',
False, None, 'localhost', 8000,
kwarg='test')
def test_atts(self):
'''Test attribute setting'''
assert self.Figure.name == 'test_figure'
assert self.Figure.host == 'localhost'
assert self.Figure.port == 8000
assert self.Figure._server_thread == None
assert self.Figure.httpd == None
assert self.Figure.interactive == True
assert self.Figure.margins == {'bottom': 25, 'height': 768, 'left': 60,
'right': 20, 'top': 10, 'width': 1024}
assert self.Figure.font == 'Asap'
assert self.Figure.args == {'font-family': "'Asap'; sans-serif",
'height': 733, 'width': 944,
'kwarg': 'test'}
| 1,179 | 29.25641 | 79 | py |
d3py | d3py-master/d3py/test.py | import unittest
import css
import pandas
import d3py
import javascript
class TestCSS(unittest.TestCase):
def setUp(self):
self.css = css.CSS()
def test_init(self):
out = css.CSS({"#test":{"fill":"red"}})
self.assertTrue(out["#test"] == {"fill":"red"})
def test_get(self):
self.css["#test"] = {"fill":"red"}
self.assertTrue(self.css["#test"] == {"fill":"red"})
def test_set(self):
self.css["#test"] = {"fill":"red"}
self.css["#test"] = {"stroke":"black"}
self.assertTrue(self.css["#test"] == {"fill":"red", "stroke":"black"})
def test_add(self):
a = css.CSS()
b = css.CSS()
a["#foo"] = {"fill":"red"}
a["#bar"] = {"fill":"blue"}
b["#foo"] = {"stroke":"green"}
b["#bear"] = {"fill":"yellow"}
out = a + b
expected = css.CSS({
"#foo":{
"fill":"red",
"stroke":"green"
},
"#bar" : {"fill":"blue"},
"#bear" : {"fill":"yellow"}
})
self.assertTrue(out.rules == expected.rules)
def test_str(self):
self.css["#test"] = {"fill":"red"}
out = str(self.css)
self.assertTrue(out == "#test {\n\tfill: red;\n}\n\n")
class Test_d3py(unittest.TestCase):
def setUp(self):
self.df = pandas.DataFrame({
"count": [1,2,3],
"time": [1326825168, 1326825169, 1326825170]
})
def test_data_to_json(self):
p = d3py.Figure(self.df)
j = p.data_to_json()
class Test_JavaScript_object_lookup(unittest.TestCase):
def setUp(self):
self.g = javascript.Selection("g").attr("color", "red")
self.j = javascript.JavaScript() + self.g
self.f = javascript.Function("test", None, "return 5")
def test_getobject(self):
self.assertTrue(self.j.get_object("g", javascript.Selection) == self.g)
def test_inplace_mod(self):
self.g.attr("test", "test")
self.assertTrue(self.j.get_object("g", javascript.Selection) == self.g)
def test_add_fucntion(self):
self.j += self.f
self.assertTrue(self.j.get_object("test", javascript.Function) == self.f)
def test_prepend_function(self):
self.j += self.f
self.f = "console.debug('hello')" + self.f
self.assertTrue(self.j.get_object("test", javascript.Function) == self.f)
if __name__ == '__main__':
unittest.main() | 2,503 | 29.168675 | 81 | py |
d3py | d3py-master/d3py/networkx_figure.py | import logging
import json
from networkx.readwrite import json_graph
import javascript as JS
from figure import Figure
class NetworkXFigure(Figure):
def __init__(self, graph, name="figure", width=400, height=100,
interactive=True, font="Asap", logging=False, template=None,
host="localhost", port=8000, **kwargs):
"""
data : networkx gprah
networkx graph used for the plot.
name : string
name of visualisation. This will appear in the title
bar of the webpage, and is the name of the folder where
your files will be stored.
width : int
width of the figure in pixels (default is 400)
height : int
height of the figure in pixels (default is 100)
interactive : boolean
set this to false if you are drawing the graph using a script and
not in the command line (default is True)
font : string
name of the font you'd like to use. See
http://www.google.com/webfonts for options (default is Asap)
keyword args are converted from foo_bar to foo-bar if you want to pass
in arbitrary css to the figure
You will need NetworkX installed for this type of Figure to work!
http://networkx.lanl.gov/
"""
super(NetworkXFigure, self).__init__(
name=name, width=width, height=height,
interactive=interactive, font=font, logging=logging, template=template,
host=host, port=port, **kwargs
)
# store data
self.G = graph
self._save_data()
def _data_to_json(self):
"""
converts the data frame stored in the figure to JSON
"""
data = json_graph.node_link_data(self.G)
s = json.dumps(data)
return s
| 1,876 | 35.096154 | 84 | py |
d3py | d3py-master/d3py/figure.py | # -*- coding: utf-8 -*-
'''
Figure
-------
Abstract Base Class for all figures. Currently subclassed by pandas_figure,
can be subclassed for other figure types.
'''
import logging
import webbrowser
from HTTPHandler import CustomHTTPRequestHandler, ThreadedHTTPServer
import IPython.core.display
import threading
from cStringIO import StringIO
import time
import json
import os
from pkg_resources import resource_string
from css import CSS
import javascript as JS
import vega
class Figure(object):
'''Abstract Base Class for all figures'''
def __init__(self, name, width, height, interactive, font, logging,
template, host, port, **kwargs):
'''
Figure is the abstract base class for all figures. Currently
subclassed by pandas_figure and networkx_figure.
Parameters:
-----------
name: string
Name of visualization; will appear in title bar of the webpage,
and in the folder where files are stored.
width : int
Width of the figure in pixels
height : int
Height of the figure in pixels
interactive : boolean
Set to false if you are drawing the graph using a script and
not in the command line.
font : string
Name of the font you'd like to use. See
http://www.google.com/webfonts for options
logging:
Logging via the sandard Python loggin library
template: string
HTML template for figure. Defaults to /d3py_template (Also, when
building your own HTML, please see the default template for
correct usage of {{ name }}, {{ host }}, {{ port }}, and
{{ font }}
host: string
Generally default to 'localhost' for local plotting
port: int
Generally defaults to 8000 for local plotting
'''
# store data
self.name = '_'.join(name.split())
d3py_path = os.path.abspath(os.path.dirname(__file__))
self.filemap = {"static/d3.js":{"fd":open(d3py_path+"/d3.js","r"),
"timestamp":time.time()},}
# Networking stuff
self.host = host
self.port = port
self._server_thread = None
self.httpd = None
'''Interactive is true by default, as this is designed to be a command
line tool. We do not want to block interaction after plotting.'''
self.interactive = interactive
self.logging = logging
# initialise strings
self.js = JS.JavaScript()
self.margins = {"top": 10, "right": 20, "bottom": 25, "left": 60,
"height":height, "width":width}
# we use bostock's scheme http://bl.ocks.org/1624660
self.css = CSS()
self.html = ""
self.template = template or resource_string('d3py', 'd3py_template.html')
self.js_geoms = JS.JavaScript()
self.css_geoms = CSS()
self.geoms = []
# misc arguments - these go into the css!
self.font = font
self.args = {"width": width - self.margins["left"] - self.margins["right"],
"height": height - self.margins["top"] - self.margins["bottom"],
"font-family": "'%s'; sans-serif"%self.font}
kwargs = dict([(k[0].replace('_','-'), k[1]) for k in kwargs.items()])
self.args.update(kwargs)
def update(self):
'''Build or update JS, CSS, & HTML, and save all data'''
logging.debug('updating chart')
self._build()
self.save()
def _build(self):
'''Build all JS, CSS, HTML, and Geometries'''
logging.debug('building chart')
if hasattr(self, 'vega'):
self.vega.build_vega()
self._build_js()
self._build_css()
self._build_html()
self._build_geoms()
def _build_css(self):
'''Build basic CSS'''
chart = {}
chart.update(self.args)
self.css["#chart"] = chart
def _build_html(self):
'''Build HTML, either via 'template' argument or default template
at /d3py_template.html.'''
self.html = self.template
self.html = self.html.replace("{{ name }}", self.name)
self.html = self.html.replace("{{ font }}", self.font)
self._save_html()
def _build_geoms(self):
'''Build D3py CSS/JS geometries. See /geoms for more details'''
self.js_geoms = JS.JavaScript()
self.css_geoms = CSS()
for geom in self.geoms:
self.js_geoms.merge(geom._build_js())
self.css_geoms += geom._build_css()
def _build_js(self):
'''Build Javascript for Figure'''
draw = JS.Function("draw", ("data",))
draw += "var margin = %s;"%json.dumps(self.margins).replace('""','')
draw += " width = %s - margin.left - margin.right"%self.margins["width"]
draw += " height = %s - margin.top - margin.bottom;"%self.margins["height"]
# this approach to laying out the graph is from Bostock: http://bl.ocks.org/1624660
draw += "var g = " + JS.Selection("d3").select("'#chart'") \
.append("'svg'") \
.attr("'width'", 'width + margin.left + margin.right + 25') \
.attr("'height'", 'height + margin.top + margin.bottom + 25') \
.append("'g'") \
.attr("'transform'", "'translate(' + margin.left + ',' + margin.top + ')'")
self.js = JS.JavaScript() + draw + JS.Function("init")
def _cleanup(self):
raise NotImplementedError
def __enter__(self):
self.interactive = False
return self
def __exit__(self, ex_type, ex_value, ex_tb):
if ex_tb is not None:
print "Cleanup after exception: %s: %s"%(ex_type, ex_value)
self._cleanup()
def __del__(self):
self._cleanup()
def ion(self):
"""
Turns interactive mode on ala pylab
"""
self.interactive = True
def ioff(self):
"""
Turns interactive mode off
"""
self.interactive = False
def _set_data(self):
'''Update JS, CSS, HTML, save all'''
self.update()
def __add__(self, geom):
'''Add d3py.geom object to the Figure'''
if isinstance(figure, vega.Vega):
self._add_vega(figure)
else:
self._add_geom(figure)
def __iadd__(self, figure):
'''Add d3py.geom or d3py.vega object to the Figure'''
if isinstance(figure, vega.Vega):
self._add_vega(figure)
else:
self._add_geom(figure)
return self
def _add_vega(self, figure):
'''Add D3py.Vega Figure'''
self.vega = figure
self.vega.tabular_data(self.data, columns=self.columns,
use_index=self.use_index)
self.template = resource_string('d3py', 'vega_template.html')
self._save_vega()
def _add_geom(self, geom):
'''Append D3py.geom to existing D3py geoms'''
self.geoms.append(geom)
self.save()
def save(self):
'''Save data and all Figure components: JS, CSS, and HTML'''
logging.debug('saving chart')
if hasattr(self, 'vega'):
self._save_vega()
self._save_data()
self._save_css()
self._save_js()
self._save_html()
def _save_data(self,directory=None):
"""
Build file map (dir path and StringIO for output) of data
Parameters:
-----------
directory : str
Specify a directory to store the data in (optional)
"""
# write data
filename = "%s.json"%self.name
self.filemap[filename] = {"fd":StringIO(self._data_to_json()),
"timestamp":time.time()}
def _save_vega(self):
'''Build file map (dir path and StringIO for output) of Vega'''
vega = json.dumps(self.vega.vega, sort_keys=True, indent=4)
self.filemap['vega.json'] = {"fd":StringIO(vega),
"timestamp":time.time()}
def _save_css(self):
'''Build file map (dir path and StringIO for output) of CSS'''
filename = "%s.css"%self.name
css = "%s\n%s"%(self.css, self.css_geoms)
self.filemap[filename] = {"fd":StringIO(css),
"timestamp":time.time()}
def _save_js(self):
'''Build file map (dir path and StringIO for output) of data'''
final_js = JS.JavaScript()
final_js.merge(self.js)
final_js.merge(self.js_geoms)
filename = "%s.js"%self.name
js = "%s"%final_js
self.filemap[filename] = {"fd":StringIO(js),
"timestamp":time.time()}
def _save_html(self):
'''Save HTML data. Will save Figure name to 'name.html'. Will also
replace {{ port }} and {{ host }} fields in template with
Figure.port and Figure.host '''
self.html = self.html.replace("{{ port }}", str(self.port))
self.html = self.html.replace("{{ host }}", str(self.host))
# write html
filename = "%s.html"%self.name
self.filemap[filename] = {"fd":StringIO(self.html),
"timestamp":time.time()}
def _data_to_json(self):
raise NotImplementedError
def show(self, interactive=None):
self.update()
self.save()
if interactive is not None:
blocking = not interactive
else:
blocking = not self.interactive
if blocking:
self._serve(blocking=True)
else:
# if not blocking, we serve the
self._serve(blocking=False)
# fire up a browser
webbrowser.open_new_tab("http://%s:%s/%s.html"%(self.host,self.port, self.name))
def display(self, width=700, height=400):
html = "<iframe src=http://%s:%s/%s.html width=%s height=%s>" %(self.host, self.port, self.name, width, height)
IPython.core.display.HTML(html)
def _serve(self, blocking=True):
"""
start up a server to serve the files for this vis.
"""
msgparams = (self.host, self.port, self.name)
url = "http://%s:%s/%s.html"%msgparams
if self._server_thread is None or self._server_thread.active_count() == 0:
Handler = CustomHTTPRequestHandler
Handler.filemap = self.filemap
Handler.logging = self.logging
try:
self.httpd = ThreadedHTTPServer(("", self.port), Handler)
except Exception, e:
print "Exception %s"%e
return False
if blocking:
logging.info('serving forever on port: %s'%msgparams[1])
msg = "You can find your chart at " + url
print msg
print "Ctrl-C to stop serving the chart and quit!"
self._server_thread = None
self.httpd.serve_forever()
else:
logging.info('serving asynchronously on port %s'%msgparams[1])
self._server_thread = threading.Thread(
target=self.httpd.serve_forever
)
self._server_thread.daemon = True
self._server_thread.start()
msg = "You can find your chart at " + url
print msg
def _cleanup(self):
try:
if self.httpd is not None:
print "Shutting down httpd"
self.httpd.shutdown()
self.httpd.server_close()
except Exception, e:
print "Error in clean-up: %s"%e
| 12,067 | 34.390029 | 119 | py |
d3py | d3py-master/d3py/javascript.py | #!/usr/bin/python
import logging
class JavaScript(object):
# TODO: Add a lookup function so you can easily find/edit functions/objects
# defined within the JavaScript object
def __init__(self, statements=None):
self.statements = []
self.objects_lookup = {}
if statements is not None:
statements = self._obj_to_statements(statements)
if isinstance(statements, list):
self.statements = statements
self.objects_lookup = self.parse_objects()
else:
raise Exception("Invalid inputed statement type")
def merge(self, other):
for line in other.statements:
if hasattr(line, "name") and (line.name, type(line.__class__)) in self.objects_lookup:
idx = self.objects_lookup[(line.name, type(line.__class__))][1]
self.statements[idx] += line
else:
self.statements.append(line)
self.objects_lookup = self.parse_objects()
def get_object(self, name, objtype):
return self.objects_lookup[(name,type(objtype))][0]
def __getitem__(self, item):
return self.statements[item]
def __setitem__(self, item, value):
self.statements[item] = value
def parse_objects(self):
objects = {}
for i, item in enumerate(self.statements):
if hasattr(item, "name") and item.name:
# Is it necissary to compound the key with the class type?
objects[ (item.name, type(item.__class__)) ] = (item, i)
return objects
def _obj_to_statements(self, other):
if isinstance(other, (Function, Selection)):
other = [other, ]
elif isinstance(other, str):
other = [other, ]
elif isinstance(other, JavaScript):
other = other.statements
return other
def __radd__(self, other):
other = self._obj_to_statements(other)
if isinstance(other, list):
return JavaScript(self.statements + other)
raise NotImplementedError
def __add__(self, other):
other = self._obj_to_statements(other)
if isinstance(other, list):
newobj = JavaScript()
newobj.statements = self.statements + other
newobj.objects_lookup = newobj.parse_objects()
return newobj
raise NotImplementedError
def __repr__(self):
return self.__str__()
def __str__(self):
js = ""
for statement in self.statements:
js += str(statement) + "\n"
return js
class Selection:
def __init__(self, name):
self.name = name
self.opts = []
# TODO maybe add_attribute should be add_method instead?
def add_attribute(self, name, *args):
self.opts.append({"name":name, "param":",".join(str(x) for x in args)})
return self
def select(self, *args):
return self.add_attribute("select", *args)
def selectAll(self, *args):
return self.add_attribute("selectAll", *args)
def data(self, *args):
return self.add_attribute("data", *args)
def enter(self, *args):
return self.add_attribute("enter", *args)
def append(self, *args):
return self.add_attribute("append", *args)
def attr(self, *args):
return self.add_attribute("attr", *args)
def style(self, *args):
return self.add_attribute("style", *args)
def id(self, *args):
# TODO what's this one for then?
return self.add_attribute("id", *args)
def call(self, *args):
return self.add_attribute("call", *args)
def __add__(self, other):
if isinstance(other, str):
return self.__str__() + other
raise NotImplementedError
def __radd__(self, other):
return other.__add__( self.__str__() )
def __repr__(self):
return self.__str__()
def __str__(self):
obj = self.name
for opt in self.opts:
if opt["param"] is None:
param = ""
elif isinstance(opt["param"], (list, tuple)):
param = ",".join([str(x) for x in opt["param"]])
else:
param = opt["param"]
obj += ".%s(%s)"%(opt["name"], param)
return obj
class Function(object):
def __init__(self, name=None, arguments=None, statements=None, autocall=False):
"""
name: string
arguments: list of strings
statements: list of strings
This ends up as
function name(arg1, arg2, arg3){
statement1;
statement2;
statement3;
}
"""
self.name = name
self.arguments = arguments
if isinstance(statements, str):
statements = [statements, ]
self.statements = statements or []
self.autocall = autocall
def _obj_to_statements(self, other):
if isinstance(other, str):
other = [other, ]
elif isinstance(other, JavaScript):
other = other.statements
elif isinstance(other, Function) and other.name == self.name and other.arguments == self.arguments:
other = other.statements
elif isinstance(other, Selection):
other = [other, ]
else:
print isinstance(other, Function)
print other.statements
logging.debug('failed to convert %s object:\n %s\n\n to statements'%(type(other),other))
other = None
return other
def __add__(self, more_statements):
more_statements = self._obj_to_statements(more_statements)
if isinstance(more_statements, (list, tuple)):
return Function(
self.name,
self.arguments,
self.statements + more_statements,
self.autocall
)
raise NotImplementedError(type(more_statements))
def __radd__(self, more_statements):
more_statements = self._obj_to_statements(more_statements)
if isinstance(more_statements, (list, tuple)):
return Function(self.name, self.arguments, more_statements + self.statements, self.autocall)
raise NotImplementedError
def __repr__(self):
return self.__str__()
def __str__(self):
fxn = "function"
if self.name is not None:
fxn += " %s"%self.name
fxn += "(%s) {\n"%(",".join(self.arguments or ""))
for line in self.statements:
fxn += "\t%s\n"%str(line)
fxn += "}\n"
if self.autocall:
fxn += "%s(%s);\n"%(self.name, ",".join(self.arguments or ""))
return fxn
| 6,796 | 32.482759 | 107 | py |
d3py | d3py-master/d3py/HTTPHandler.py | import SimpleHTTPServer
import SocketServer
from cStringIO import StringIO
import sys
class ThreadedHTTPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
class CustomHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
"""
We get rid of the BaseHTTPRequestHandler logging messages
because they can get annoying!
"""
if self.logging:
super().log_message(format, *args)
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.seek(0)
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.seek(0)
def send_head(self):
"""
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.path[1:] #get rid of leading '/'
f = None
ctype = self.guess_type(path)
try:
f = self.filemap[path]["fd"]
except KeyError:
return self.list_directory()
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Last-Modified", self.date_time_string(self.filemap[path]["timestamp"]))
self.end_headers()
return f
def list_directory(self):
f = StringIO()
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing</title>\n")
f.write("<body>\n<h2>Directory listing</h2>\n")
f.write("<hr>\n<ul>\n")
for path, meta in self.filemap.iteritems():
f.write('<li><a href="%s">%s</a>\n' % (path, path))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
| 2,383 | 32.111111 | 97 | py |
d3py | d3py-master/d3py/templates.py | d3py_template = '''<html>
<head>
<script type="text/javascript" src="http://mbostock.github.com/d3/d3.js"></script>
<script type="text/javascript" src="http://{{ host }}:{{ port }}/{{ name }}.js"></script>
<link type="text/css" rel="stylesheet" href="http://{{ host }}:{{ port }}/{{ name }}.css">
<link href='http://fonts.googleapis.com/css?family={{ font }}' rel='stylesheet' type='text/css'>
<title>d3py: {{ name }}</title>
</head>
<body>
<div id="chart"></div>
<script>
d3.json("http://{{ host }}:{{ port }}/{{ name }}.json", draw);
</script>
</body>
</html>
'''
| 586 | 28.35 | 100 | py |
d3py | d3py-master/d3py/pandas_figure.py | import numpy as np
import logging
import json
import javascript as JS
from figure import Figure
class PandasFigure(Figure):
def __init__(self, data, name="figure", width=800, height=400,
columns = None, use_index=False, interactive=True, font="Asap",
logging=False, template=None, host="localhost", port=8000, **kwargs):
"""
data : dataFrame
pandas dataFrame used for the plot. This dataFrame is column centric
name : string
name of visualisation. This will appear in the title
bar of the webpage, and is the name of the folder where
your files will be stored.
width : int
width of the figure in pixels (default is 1024)
height : int
height of the figure in pixels (default is 768)
columns: dict, default None
DataFrame columns you want to visualize for Vega
use_index: boolean, default False
If true, D3py.Vega uses the index for the x-axis instead of a second
column
interactive : boolean
set this to false if you are drawing the graph using a script and
not in the command line (default is True)
font : string
name of the font you'd like to use. See
http://www.google.com/webfonts for options (default is Asap)
keyword args are converted from foo_bar to foo-bar if you want to pass
in arbitrary css to the figure
"""
super(PandasFigure, self).__init__(name=name, width=width, height=height,
interactive=interactive, font=font,
logging=logging, template=template,
host=host, port=port, **kwargs)
# store data
self.columns = columns
self.use_index = use_index
self.data = data
self._save_data()
def _set_data(self, data):
errmsg = "the %s geom requests %s which is not the given dataFrame!"
for geom in self.geoms:
for param in geom.params:
if param:
assert param in data, errmsg%(geom.name, param)
self.update()
def _add_geom(self, geom):
errmsg = "the %s geom requests %s which is not in our dataFrame!"
for p in geom.params:
if p:
assert p in self.data, errmsg%(geom.name, p)
self.geoms.append(geom)
self.save()
def _build_scales(self):
"""
build a function that returns the requested scale
"""
logging.debug('building scales')
get_scales = """
function get_scales(colnames, orientation){
var this_data = d3.merge(
colnames.map(
function(name){
return data.map(
function(d){
return d[name]
}
)
}
)
)
if (orientation==="vertical"){
if (isNaN(this_data[0])){
// not a number
console.log('using ordinal scale for vertical axis')
scale = d3.scale.ordinal()
.domain(this_data)
.range(d3.range(height,0,height/this_data.length))
} else {
// a number
console.log('using linear scale for vertical axis')
extent = d3.extent(this_data)
extent[0] = extent[0] > 0 ? 0 : extent[0]
scale = d3.scale.linear()
.domain(extent)
.range([height,0])
}
} else {
if (isNaN(this_data[0])){
// not a number
console.log('using ordinal scale for horizontal axis')
scale = d3.scale.ordinal()
.domain(this_data)
.rangeBands([0,width], 0.1)
} else {
// a number
console.log('using linear scale for horizontal axis')
scale = d3.scale.linear()
.domain(d3.extent(this_data))
.range([0,width])
}
}
return scale
}
"""
return get_scales
def _build_js(self):
draw = JS.Function("draw", ("data",))
draw += "var margin = %s;"%json.dumps(self.margins).replace('""','')
draw += " width = %s - margin.left - margin.right"%self.margins["width"]
draw += " height = %s - margin.top - margin.bottom;"%self.margins["height"]
# this approach to laying out the graph is from Bostock: http://bl.ocks.org/1624660
draw += "var g = " + JS.Selection("d3").select("'#chart'") \
.append("'svg'") \
.attr("'width'", 'width + margin.left + margin.right + 25') \
.attr("'height'", 'height + margin.top + margin.bottom + 25') \
.append("'g'") \
.attr("'transform'", "'translate(' + margin.left + ',' + margin.top + ')'")
scales = self._build_scales()
draw += scales
self.js = JS.JavaScript() + draw + JS.Function("init")
def _data_to_json(self):
"""
converts the data frame stored in the figure to JSON
"""
def cast(a):
try:
return float(a)
except ValueError:
return a
d = [
dict([
(colname, cast(row[i]))
for i,colname in enumerate(self.data.columns)
])
for row in self.data.values
]
try:
s = json.dumps(d, sort_keys=True, indent=4)
except OverflowError, e:
print "Error: Overflow on variable (type %s): %s: %s"%(type(d), d, e)
raise
return s
| 6,134 | 37.34375 | 91 | py |
d3py | d3py-master/d3py/__init__.py | from pandas_figure import *
from networkx_figure import *
from geoms import *
import javascript
| 96 | 18.4 | 29 | py |
d3py | d3py-master/d3py/css.py | #!/usr/bin/python
class CSS:
"""
a CSS object is a dictionary whose keys are CSS selectors and whose values
are dictionaries of CSS declarations. This object is named according to the
definition of CSS on wikipedia:
A style sheet consists of a list of rules.
Each rule or rule-set consists of one or more selectors and a
declaration block.
A declaration-block consists of a list of declarations in braces.
Each declaration itself consists of a property, a colon (:), a value.
"""
def __init__(self, css=None):
self.rules = css or {}
assert isinstance(self.rules, dict)
def __getitem__(self, selector):
"returns the dictionary of CSS declarations, given a selector"
return self.rules[selector]
def __setitem__(self, selector, declarations):
"adds a dictionary of CSS declarations to the specified selector"
assert isinstance(declarations, dict)
if selector in self.rules:
self.rules[selector].update(declarations)
else:
self.rules[selector] = declarations
def __add__(self, css):
if isinstance(css, dict):
for selector, declarations in css.iteritems():
try:
self.rules[selector].update(declarations)
except KeyError:
self.rules[selector] = declarations
return self
elif isinstance(css, CSS):
return self.__add__(css.rules)
else:
errmsg = "Unsupported addition between %s and %s"
raise Exception(errmsg % (type(self), type(css)))
def __str__(self):
css = ""
for selector, declarations in self.rules.iteritems():
css += "%s {\n" % selector
for prop, value in declarations.iteritems():
if value is None:
value = "none"
css += "\t%s: %s;\n" % (prop, value)
css += "}\n\n"
return css
| 2,020 | 35.745455 | 79 | py |
d3py | d3py-master/d3py/vega.py | '''
Vega/Vincent
-------
The d3py port of the Vincent project:
https://github.com/wrobstory/vincent
'''
from __future__ import print_function
import os
import json
from pkg_resources import resource_string
import pandas as pd
import pdb
class Vega(object):
'''Vega abstract base class'''
def __init__(self, width=400, height=200,
padding={'top': 10, 'left': 30, 'bottom': 20, 'right': 10},
viewport=None):
'''
The Vega classes generate JSON output in Vega grammar, a
declarative format for creating and saving visualization designs.
This class is meant to be an abstract base class on which to build
the other piece of the complete VEGA specification.
A Vega object is instantiated with only the Vega Visualization basic,
properties, with default values for the name, width, height, padding,
and viewport.
Parameters:
-----------
width: int, default 800
Width of the visualization
height: int, default 400
Height of the visualization
padding: dict, default {'top': 10, 'left': 30, 'bottom':20, 'right':10}
Internal margins for the visualization, Top, Left, Bottom, Right
viewport: list, default None
Width and height of on-screen viewport
'''
self.width = width
self.height = height
self.padding = padding
self.viewport = viewport
self.visualization = {'width': self.width,
'padding': self.padding,
'viewport': self.viewport}
self.data = []
self.scales = []
self.axes = []
self.marks = []
self.build_vega()
def __add__(self, tuple):
'''Allow for updating of Vega with add operator'''
self.update_component('add', *tuple)
def __iadd__(self, tuple):
'''Allow for updating of Vega with iadd operator'''
self.update_component('add', *tuple)
return self
def __sub__(self, tuple):
'''Allow for updating of Vega with sub operator'''
self.update_component('remove', *tuple)
def __isub__(self, tuple):
'''Allow for updating of Vega with sub operator'''
self.update_component('remove', *tuple)
return self
def build_vega(self, *args):
'''Build complete vega specification. String arguments passed will not
be included in vega dict.
Ex: object.build_vega('viewport')
'''
keys = ['width', 'height', 'padding', 'viewport', 'data',
'scales', 'axes', 'marks']
self.vega = {}
for key in keys:
if key not in args:
self.vega[key] = getattr(self, key)
def update_vis(self, **kwargs):
'''
Update Vega Visualization basic properties:
width, height, padding, viewport
Ex: >>>my_vega.update_vis(height=800, width=800)
'''
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.build_vega()
def build_component(self, append=True, **kwargs):
'''Build complete Vega component.
The Vega grammar will update with passed keywords. This method
rebuilds an entire Vega component: axes, data, scales, marks, etc.
Examples:
>>>my_vega.build_component(scales=[{"domain": {"data": "table",
"field": "data.x"},
"name":"x", "type":"ordinal",
"range":"width"}])
>>>my_vega.build_component(axes=[{"scale": "x", type: "x"},
{"scale": "y", type: "y"}],
append=False)
'''
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.build_vega()
def update_component(self, change, value, parameter, index, *args):
'''Update individual parameters of any component.
Parameters:
-----------
change: string, either 'add' or 'remove'
'add' will add the value to the last specified key in *args (this
can be a new key). 'remove' will remove the key specified by
'value'.
value: Any JSON compatible datatype
The value you want to substitute into the component
parameter: string
The Vega component you want to modify (scales, marks, etc)
index: int
The index of dict/object in the component array you want to mod
Examples:
>>>my_vega.update_component(add, 'w', 'axes', 0, 'scale')
>>>my_vega.update_component('remove', 'width', 'marks', 0,
'properties', 'enter')
'''
def set_keys(value, param, key, *args):
if args:
return set_keys(value, param.get(key), *args)
if change == 'add':
param[key] = value
else:
param[key].pop(value)
parameter = getattr(self, parameter)[index]
if not args:
args = [value]
if change == 'remove':
parameter.pop(value)
self.build_vega()
return
set_keys(value, parameter, *args)
self.build_vega()
def multi_update(self, comp_list):
'''Pass a list of component updates to change all'''
for update in comp_list:
self.update_component(*update)
def _json_IO(self, host, port):
'''Return data values as JSON for StringIO '''
data_vals = self.data[0]['values']
self.update_component('remove', 'values', 'data', 0)
url = ''.join(['http://', host, ':', str(port), '/data.json'])
self.update_component('add', url, 'data', 0, 'url')
vega = json.dumps(self.vega, sort_keys=True, indent=4)
data = json.dumps(data_vals, sort_keys=True, indent=4)
return vega, data
def to_json(self, path, split_data=False, html=False):
'''
Save Vega object to JSON
Parameters:
-----------
path: string
File path for Vega grammar JSON.
split_data: boolean, default False
Split the output into a JSON with only the data values, and a
Vega grammar JSON referencing that data.
html: boolean, default False
Output Vega Scaffolding HTML file to path
'''
def json_out(path, output):
'''Output to JSON'''
with open(path, 'w') as f:
json.dump(output, f, sort_keys=True, indent=4,
separators=(',', ': '))
if split_data:
data_out = self.data[0]['values']
self.update_component('remove', 'values', 'data', 0)
self.update_component('add', 'data.json', 'data', 0, 'url')
data_path = os.path.dirname(path) + r'/data.json'
json_out(data_path, data_out)
json_out(path, self.vega)
if html:
template = resource_string('vincent', 'vega_template.html')
html_path = ''.join([os.path.dirname(path),
r'/vega_template.html'])
with open(html_path, 'w') as f:
f.write(template)
self.tabular_data(self.raw_data)
else:
json_out(path, self.vega)
def tabular_data(self, data, name="table", columns=None, use_index=False,
append=False):
'''Create the data for a bar chart in Vega grammer. Data can be passed
in a list, dict, or Pandas Dataframe.
Parameters:
-----------
name: string, default "table"
Type of visualization
columns: list, default None
If passing Pandas DataFrame, you must pass at least one column
name.If one column is passed, x-values will default to the index
values.If two column names are passed, x-values are columns[0],
y-values columns[1].
use_index: boolean, default False
Use the DataFrame index for your x-values
append: boolean, default False
Append new data to data already in object
Examples:
---------
>>>myvega.tabular_data([10, 20, 30, 40, 50])
>>>myvega.tabular_data({'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50}
>>>myvega.tabular_data(my_dataframe, columns=['column 1'],
use_index=True)
>>>myvega.tabular_data(my_dataframe, columns=['column 1', 'column 2'])
'''
self.raw_data = data
#Tuples
if isinstance(data, tuple):
values = [{"x": x[0], "y": x[1]} for x in data]
#Lists
if isinstance(data, list):
if append:
start = self.data[0]['values'][-1]['x'] + 1
end = len(self.data[0]['values']) + len(data)
else:
start, end = 0, len(data)
default_range = xrange(start, end+1, 1)
values = [{"x": x, "y": y} for x, y in zip(default_range, data)]
#Dicts
if isinstance(data, dict) or isinstance(data, pd.Series):
values = [{"x": x, "y": y} for x, y in data.iteritems()]
#Dataframes
if isinstance(data, pd.DataFrame):
if len(columns) > 1 and use_index:
raise ValueError('If using index as x-axis, len(columns)'
'cannot be > 1')
if use_index or len(columns) == 1:
values = [{"x": x[0], "y": x[1][columns[0]]}
for x in data.iterrows()]
else:
values = [{"x": x[1][columns[0]], "y": x[1][columns[1]]}
for x in data.iterrows()]
if append:
self.data[0]['values'].extend(values)
else:
self.data = []
self.data.append({"name": name, "values": values})
self.build_vega()
class Bar(Vega):
'''Create a bar chart in Vega grammar'''
def __init__(self):
'''Build Vega Bar chart with default parameters'''
super(Bar, self).__init__()
self.scales = [{"name": "x", "type": "ordinal", "range": "width",
"domain": {"data": "table", "field": "data.x"}},
{"name": "y", "range": "height", "nice": True,
"domain": {"data": "table", "field": "data.y"}}]
self.axes = [{"type": "x", "scale": "x"}, {"type": "y", "scale": "y"}]
self.marks = [{"type": "rect", "from": {"data": "table"},
"properties": {
"enter": {
"x": {"scale": "x", "field": "data.x"},
"width": {"scale": "x", "band": True,
"offset": -1},
"y": {"scale": "y", "field": "data.y"},
"y2": {"scale": "y", "value": 0}
},
"update": {"fill": {"value": "#2a3140"}},
"hover": {"fill": {"value": "#a63737"}}
}
}]
self.build_vega()
class Area(Bar):
'''Create an area chart in Vega grammar'''
def __init__(self):
'''Build Vega Area chart with default parameters'''
super(Area, self).__init__()
area_updates = [('remove', 'width', 'marks', 0, 'properties', 'enter'),
('add', 'area', 'marks', 0, 'type'),
('add', 'linear', 'scales', 0, 'type')]
self.multi_update(area_updates)
self.build_vega()
class Scatter(Bar):
'''Create a scatter plot in Vega grammar'''
def __init__(self):
'''Build Vega Scatter chart with default parameters'''
super(Scatter, self).__init__()
self.height, self.width = 400, 400
self.padding = {'top': 40, 'left': 40, 'bottom': 40, 'right': 40}
scatter_updates = [('remove', 'type', 'scales', 0),
('add', True, 'scales', 0, 'nice'),
('remove', 'width', 'marks', 0, 'properties',
'enter'),
('remove', 'y2', 'marks', 0, 'properties',
'enter'),
('remove', 'hover', 'marks', 0, 'properties'),
('add', {'value': '#2a3140'}, 'marks', 0,
'properties', 'enter', 'stroke'),
('add', {'value': 0.9}, 'marks', 0, 'properties',
'enter', 'fillOpacity'),
('add', 'symbol', 'marks', 0, 'type')]
self.multi_update(scatter_updates)
self.build_vega()
class Line(Bar):
'''Create a line plot in Vega grammar'''
def __init__(self):
'''Build Vega Line plot chart with default parameters'''
pass
#Something still broken- need to do some syntax hunting...
super(Line, self).__init__()
line_updates = [('add', 'linear', 'scales', 0, 'type'),
('remove', 'update', 'marks', 0, 'properties'),
('remove', 'hover', 'marks', 0, 'properties'),
('remove', 'width', 'marks', 0, 'properties', 'enter'),
('add', 'line', 'marks', 0, 'type'),
('add', {'value': '#2a3140'}, 'marks', 0,
'properties', 'enter', 'stroke'),
('add', {'value': 2}, 'marks', 0, 'properties',
'enter', 'strokeWidth')]
self.multi_update(line_updates)
self.build_vega()
| 14,109 | 35.086957 | 79 | py |
d3py | d3py-master/d3py/geoms/area.py | from geom import Geom, JavaScript, Selection, Function
class Area(Geom):
def __init__(self,x,yupper,ylower,**kwargs):
Geom.__init__(self,**kwargs)
self.x = x
self.yupper = yupper
self.ylower = ylower
self.params = [x, yupper, ylower]
self.debug = True
self.name = "area"
self._build_js()
self._build_css()
def _build_js(self):
scales = """
scales = {
x: get_scales(['%s'], 'horizontal'),
y: get_scales(['%s','%s'], 'vertical')
}
"""%(self.x, self.ylower, self.yupper)
x_fxn = Function(None, "d", "return scales.x(d.%s)"%self.x)
y1_fxn = Function(None, "d", "return scales.y(d.%s)"%self.yupper)
y0_fxn = Function(None, "d", "return scales.y(d.%s)"%self.ylower)
draw = Function("draw", ("data", ))
draw += scales
draw += "var area = " + Selection("d3.svg").add_attribute("area") \
.add_attribute("x", x_fxn) \
.add_attribute("y0", y0_fxn) \
.add_attribute("y1", y1_fxn)
draw += "console.log(data)"
draw += "console.log(area(data))"
draw += "console.log(scales.y(data[0].y))"
draw += Selection("g").append("'svg:path'") \
.attr("'d'", "area(data)") \
.attr("'class'", "'geom_area'") \
.attr("'id'", "'area_%s_%s_%s'"%(self.x, self.yupper, self.ylower))
self.js = JavaScript(draw)
return self.js
def _build_css(self):
# default css
geom_area = {"stroke-width": "1px", "stroke": "black", "fill": "MediumSeaGreen"}
self.css[".geom_area"] = geom_area
self.css["#area_%s_%s_%s"%(self.x,self.yupper, self.ylower)] = self.styles
return self.css
| 1,844 | 30.810345 | 88 | py |
d3py | d3py-master/d3py/geoms/geom.py | from ..css import CSS
from ..javascript import JavaScript, Selection, Function
class Geom:
def __init__(self, **kwargs):
self.styles = kwargs
self.js = JavaScript()
self.css = CSS()
def _build_js(self):
raise NotImplementedError
def _build_css(self):
raise NotImplementedError
| 340 | 21.733333 | 56 | py |
d3py | d3py-master/d3py/geoms/point.py | from geom import Geom, JavaScript, Selection, Function
class Point(Geom):
def __init__(self,x,y,c=None,**kwargs):
Geom.__init__(self, **kwargs)
self.x = x
self.y = y
self.c = c
self._id = 'point_%s_%s_%s'%(self.x,self.y,self.c)
self.params = [x,y,c]
self.name = "point"
self._build_css()
self._build_js()
def _build_css(self):
point = {
"stroke-width" : "1px",
"stroke" : "black",
"fill-opacity" : 0.7,
"stroke-opacity": 1,
"fill" : "blue"
}
self.css[".geom_point"] = point
# arbitrary styles
self.css["#"+self._id] = self.styles
return self.css
def _build_js(self):
scales = """
scales = {
x : get_scales(['%s'], 'horizontal'),
y : get_scales(['%s'], 'vertical')
}
"""%(self.x, self.y)
draw = Function("draw", ("data",))
draw += scales
js_cx = Function(None, "d", "return scales.x(d.%s);"%self.x)
js_cy = Function(None, "d", "return scales.y(d.%s);"%self.y)
obj = Selection("g").selectAll("'.geom_point'") \
.data("data") \
.enter() \
.append("'svg:circle'") \
.attr("'cx'", js_cx) \
.attr("'cy'", js_cy) \
.attr("'r'", 4) \
.attr("'class'", "'geom_point'") \
.attr("'id'", "'%s'"%self._id)
if self.c:
fill = Function(None, "return d.%s;"%self.c)
obj.add_attribute("style", "fill", fill)
draw += obj
self.js = JavaScript(draw)
return self.js
| 1,968 | 34.160714 | 69 | py |
d3py | d3py-master/d3py/geoms/xaxis.py | from geom import Geom, JavaScript, Selection, Function
class xAxis(Geom):
def __init__(self,x, label=None, **kwargs):
"""
x : string
name of the column you want to use to define the x-axis
"""
Geom.__init__(self, **kwargs)
self.x = x
self.label = label if label else x
self.params = [x]
self._id = 'xaxis'
self.name = 'xaxis'
self._build_css()
self._build_js()
def _build_js(self):
draw = Function("draw", ("data",), [])
scale = "scales.x"
draw += "xAxis = d3.svg.axis().scale(%s)"%scale
xaxis_group = Selection("g").append('"g"') \
.attr('"class"','"xaxis"') \
.attr('"transform"', '"translate(0," + height + ")"') \
.call("xAxis")
draw += xaxis_group
if self.label:
# TODO: Have the transform on this label be less hacky
label_group = Selection("g").append('"text"') \
.add_attribute("text", '"%s"'%self.label) \
.attr('"text-anchor"', '"middle"') \
.attr('"x"', "width/2") \
.attr('"y"', "height+45")
draw += label_group
self.js = JavaScript() + draw
return self.js
def _build_css(self):
axis_path = {
"fill" : "none",
"stroke" : "#000"
}
self.css[".xaxis path"] = axis_path
axis_path = {
"fill" : "none",
"stroke" : "#000"
}
self.css[".xaxis line"] = axis_path
return self.css
| 1,650 | 29.574074 | 69 | py |
d3py | d3py-master/d3py/geoms/graph.py | from geom import Geom, JavaScript, Selection, Function
class ForceLayout(Geom):
def __init__(self,**kwargs):
Geom.__init__(self,**kwargs)
self.name = "forceLayout"
self._id = 'forceLayout'
self._build_js()
self._build_css()
self.styles = dict([(k[0].replace('_','-'), k[1]) for k in kwargs.items()])
def _build_js(self):
draw = Function("draw", ("data",), [])
draw += Selection("g") \
.selectAll("'circle.node'") \
.data("data.nodes") \
.enter() \
.append("'circle'") \
.attr("'class'","'node'") \
.attr("'r'", 12)
draw += Selection("g") \
.selectAll("'line.link'") \
.data("data.links") \
.enter() \
.append("'line'") \
.attr("'class'", "'link'")
code = [
"var force = d3.layout.force()",
".charge(-120)",
'.linkDistance(30)',
'.size([width, height])',
'.nodes(data.nodes)',
'.links(data.links);'
'force.on("tick", function() {',
'g.selectAll("line.link").attr("x1", function(d) { return d.source.x; })',
'.attr("y1", function(d) { return d.source.y; })',
'.attr("x2", function(d) { return d.target.x; })',
'.attr("y2", function(d) { return d.target.y; });',
'g.selectAll("circle.node").attr("cx", function(d) { return d.x; })',
'.attr("cy", function(d) { return d.y; });',
'});',
'g.selectAll("circle.node").call(force.drag);',
'force.start();',
]
# TODO the order of the next two lines seems inappropriately important
draw += JavaScript(code)
self.js = JavaScript() + draw
self.js += (Function("init", autocall=True) + "console.debug('Hi');")
return self.js
def _build_css(self):
line = {
"stroke-width": "1px",
"stroke": "black",
}
self.css[".link"] = line
# arbitrary styles
self.css["#"+self._id] = self.styles
return self.css
| 2,376 | 32.478873 | 90 | py |
d3py | d3py-master/d3py/geoms/bar.py | from geom import Geom, JavaScript, Selection, Function
class Bar(Geom):
def __init__(self,x,y,**kwargs):
"""
This is a vertical bar chart - the height of each bar represents the
magnitude of each class
x : string
name of the column that contains the class labels
y : string
name of the column that contains the magnitude of each class
"""
Geom.__init__(self,**kwargs)
self.x = x
self.y = y
self.name = "bar"
self._id = 'bar_%s_%s'%(self.x,self.y)
self._build_js()
self._build_css()
self.params = [x,y]
self.styles = dict([(k[0].replace('_','-'), k[1]) for k in kwargs.items()])
def _build_js(self):
# build scales
scales = """
scales = {
x : get_scales(['%s'], 'horizontal'),
y : get_scales(['%s'], 'vertical')
}
"""%(self.x, self.y)
xfxn = Function(None, "d", "return scales.x(d.%s);"%self.x)
yfxn = Function( None, "d", "return scales.y(d.%s)"%self.y)
heightfxn = Function(
None,
"d",
"return height - scales.y(d.%s)"%self.y
)
draw = Function("draw", ("data",), [scales])
draw += scales
draw += Selection("g").selectAll("'.bars'") \
.data("data") \
.enter() \
.append("'rect'") \
.attr("'class'", "'geom_bar'") \
.attr("'id'", "'%s'"%self._id) \
.attr("'x'", xfxn) \
.attr("'y'", yfxn) \
.attr("'width'", "scales.x.rangeBand()")\
.attr("'height'", heightfxn)
# TODO: rangeBand above breaks for histogram type bar-plots... fix!
self.js = JavaScript() + draw
self.js += (Function("init", autocall=True) + "console.debug('Hi');")
return self.js
def _build_css(self):
bar = {
"stroke-width": "1px",
"stroke": "black",
"fill-opacity": 0.7,
"stroke-opacity": 1,
"fill": "blue"
}
bar.update
self.css[".geom_bar"] = bar
# arbitrary styles
self.css["#"+self._id] = self.styles
return self.css
| 2,313 | 29.853333 | 83 | py |
d3py | d3py-master/d3py/geoms/yaxis.py | from geom import Geom, JavaScript, Selection, Function
class yAxis(Geom):
def __init__(self,y, label=None, **kwargs):
"""
y : string
name of the column you want to use to define the y-axis
"""
Geom.__init__(self, **kwargs)
self.y = y
self.label = label if label else y
self.params = [y]
self._id = 'yaxis'
self.name = 'yaxis'
self._build_css()
self._build_js()
def _build_js(self):
draw = Function("draw", ("data",), [])
scale = "scales.y"
draw += "yAxis = d3.svg.axis().scale(%s).orient('left')"%scale
yaxis_group = Selection("g").append('"g"') \
.attr('"class"','"yaxis"') \
.call("yAxis")
draw += yaxis_group
if self.label:
# TODO: Have the transform on this label be less hacky
label_group = Selection("g").append('"text"') \
.add_attribute("text", '"%s"'%self.label) \
.attr('"y"', '- margin.left + 15') \
.attr('"x"', '- height / 2.0') \
.attr('"text-anchor"', '"middle"') \
.attr('"transform"', '"rotate(-90, 0, 0)"')
draw += label_group
self.js = JavaScript() + draw
return self.js
def _build_css(self):
axis_path = {
"fill" : "none",
"stroke" : "#000"
}
self.css[".yaxis path"] = axis_path
axis_path = {
"fill" : "none",
"stroke" : "#000"
}
self.css[".yaxis line"] = axis_path
return self.css
| 1,677 | 30.074074 | 70 | py |
d3py | d3py-master/d3py/geoms/__init__.py | #!/usr/local/bin/python
from xaxis import xAxis
from yaxis import yAxis
from point import Point
from bar import Bar
from line import Line
from area import Area
from geom import Geom
from graph import ForceLayout
| 213 | 18.454545 | 29 | py |
d3py | d3py-master/d3py/geoms/line.py | from geom import Geom, JavaScript, Selection, Function
class Line(Geom):
def __init__(self,x,y,**kwargs):
Geom.__init__(self,**kwargs)
self.x = x
self.y = y
self.params = [x,y]
self.debug = True
self.name = "line"
self._build_js()
self._build_css()
def _build_js(self):
# build scales
scales = """
scales = {
x : get_scales(['%s'], 'horizontal'),
y : get_scales(['%s'], 'vertical')
}
"""%(self.x, self.y)
# add the line
x_fxn = Function(None, "d", "return scales.x(d.%s)"%self.x)
y_fxn = Function(None, "d", "return scales.y(d.%s)"%self.y)
draw = Function("draw", ("data", ))
draw += scales
draw += "var line = " + Selection("d3.svg").add_attribute("line") \
.add_attribute("x", x_fxn) \
.add_attribute("y", y_fxn)
draw += Selection("g").append("'svg:path'") \
.attr("'d'", "line(data)") \
.attr("'class'", "'geom_line'") \
.attr("'id'", "'line_%s_%s'"%(self.x, self.y))
self.js = JavaScript(draw)
return self.js
def _build_css(self):
# default css
geom_line = {"stroke-width": "1px", "stroke": "black", "fill": None}
self.css[".geom_line"] = geom_line
self.css["#line_%s_%s"%(self.x,self.y)] = self.styles
return self.css
| 1,624 | 32.163265 | 82 | py |
z2n-periodogram | z2n-periodogram-master/setup.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Generic/Built-in
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='z2n-periodogram',
version='2.0.6',
license='MIT',
python_requires='>=3.6.9',
install_requires=[
'intel-openmp',
'click',
'pillow',
'click-shell',
'matplotlib',
'termtables',
'colorama',
'astropy',
'psutil',
'numpy',
'numba',
'icc-rt',
'scipy',
'h5py',
'tqdm',
'tbb',
],
tests_require=['pytest'],
extras_require={
"dev": [
"pytest",
"mkdocs",
"mkautodoc",
"markdown-katex",
],
},
entry_points='''
[console_scripts]
z2n=z2n.main:cli
''',
author='Yohan Alexander',
author_email='[email protected]',
description='A package for interative periodograms analysis.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://z2n-periodogram.readthedocs.io',
project_urls={
"Documentation": "https://z2n-periodogram.readthedocs.io",
"Source": "https://github.com/yohanalexander/z2n-periodogram",
},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Astronomy",
],
packages=setuptools.find_namespace_packages(),
)
| 1,886 | 28.030769 | 74 | py |
z2n-periodogram | z2n-periodogram-master/z2n/main.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Other Libraries
import click
import matplotlib
def cli() -> None:
"""Entry point to the Z2n Software."""
try:
matplotlib.use('tkagg')
except (ImportError, ModuleNotFoundError):
click.secho("Failed to use interactive backend.", fg='red')
click.secho(
"Check Tkinter dependency: sudo apt-get install python3-tk""", fg='yellow')
else:
# Owned Libraries
from z2n import prompt
prompt.z2n()
if __name__ == "__main__":
cli()
| 549 | 21 | 87 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.