date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | mizolotu/ccrl | stable_baselines~ppo~ppod.py | import os
import time
import os.path as osp
import gym
from gym import spaces
import tensorflow as tf
import numpy as np
import pandas as pd
from stable_baselines.common.base_class import BaseRLModel
from stable_baselines.common.buffers import RolloutBuffer
from stable_baselines.common.utils import explained_variance, get_schedule_fn
from stable_baselines.common import logger
from stable_baselines.ppo.policies import PPOPolicy
from stable_baselines.common.save_util import data_to_json, json_to_data
from collections import deque
class PPOD(BaseRLModel):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI spinningup (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: (PPOPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param learning_rate: (float or callable) The learning rate, it can be a function
of the current progress (from 1 to 0)
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param batch_size: (int) Minibatch size
:param n_epochs: (int) Number of epoch when optimizing the surrogate loss
:param gamma: (float) Discount factor
:param gae_lambda: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: (float or callable) Clipping parameter, it can be a function of the current progress
(from 1 to 0).
:param clip_range_vf: (float or callable) Clipping parameter for the value function,
it can be a function of the current progress (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param ent_coef: (float) Entropy coefficient for the loss calculation
:param vf_coef: (float) Value function coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param target_kl: (float) Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param create_eval_env: (bool) Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param seed: (int) Seed for the pseudo random generators
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
"""
def __init__(self, policy, env, learning_rate=3e-4,
n_steps=2048, batch_size=64, n_epochs=10,
gamma=0.99, gae_lambda=0.95, clip_range=0.2, clip_range_vf=None,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5,
target_kl=None, tensorboard_log=None, create_eval_env=False,
policy_kwargs=None, verbose=0, seed=0,
_init_setup_model=True, modelpath=None, logpath=None):
super(PPOD, self).__init__(policy, env, PPOPolicy, policy_kwargs=policy_kwargs, verbose=verbose, create_eval_env=create_eval_env, support_multi_env=True, seed=seed)
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_epochs = n_epochs
self.n_steps = n_steps
self.gamma = gamma
self.gae_lambda = gae_lambda
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.rollout_buffer = None
self.target_kl = target_kl
self.tensorboard_log = tensorboard_log
self.tb_writer = None
self.iteration_start = 0
self.time_elapsed_start = 0
self.num_timesteps_start = 0
self.modelpath = modelpath
params_loaded, policy_loaded = self._setup_model(modelpath)
if logpath is not None:
p = None
if modelpath is not None and params_loaded and policy_loaded:
try:
fname = osp.join(logpath, 'progress.csv')
p = pd.read_csv(fname, delimiter=',', dtype=float)
except:
pass
format_strs = os.getenv('', 'stdout,log,csv').split(',')
logger.configure(os.path.abspath(logpath), format_strs)
if p is not None:
keys = p.keys()
vals = p.values
self.iteration_start = p['iterations'].values[-1]
self.num_timesteps_start = p['total timesteps'].values[-1]
self.time_elapsed_start = p['time_elapsed'].values[-1]
for i in range(vals.shape[0]):
for j in range(len(keys)):
logger.logkv(keys[j], vals[i, j])
logger.dumpkvs()
def _setup_model(self, modelpath=None):
self._setup_learning_rate()
# TODO: preprocessing: one hot vector for obs discrete
state_dim = self.observation_space.shape[0]
if isinstance(self.action_space, spaces.Box):
# Action is a 1D vector
action_dim = self.action_space.shape[0]
elif isinstance(self.action_space, spaces.Discrete):
# Action is a scalar
action_dim = 1
# TODO: different seed for each env when n_envs > 1
if self.n_envs == 1:
self.set_random_seed(self.seed)
if modelpath is not None:
try:
data, w_path = self.load(modelpath)
self.__dict__.update(data)
params_loaded = True
except Exception as e:
print(e)
params_loaded = False
self.policy = self.policy_class(self.observation_space, self.action_space, self.learning_rate, **self.policy_kwargs)
self.policy.summary()
if modelpath is not None:
try:
self.policy.load(w_path)
policy_loaded = True
except Exception as e:
print(e)
policy_loaded = False
self.rollout_buffer = RolloutBuffer(self.n_steps, state_dim, action_dim, gamma=self.gamma, gae_lambda=self.gae_lambda, n_envs=self.n_envs)
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
return params_loaded, policy_loaded
def predict(self, observation, state=None, mask=None, deterministic=False):
"""
Get the model's action from an observation
:param observation: (np.ndarray) the input observation
:param state: (np.ndarray) The last states (can be None, used in recurrent policies)
:param mask: (np.ndarray) The last masks (can be None, used in recurrent policies)
:param deterministic: (bool) Whether or not to return deterministic actions.
:return: (np.ndarray, np.ndarray) the model's action and the next state (used in recurrent policies)
"""
clipped_actions = self.policy.actor_forward(observation, deterministic=deterministic)
#clipped_actions = self.policy.actor_forward(np.array(observation).reshape(1, -1), deterministic=deterministic)
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(clipped_actions, self.action_space.low, self.action_space.high)
return clipped_actions
def collect_rollouts(self, env, rollout_buffer, n_rollout_steps=256, callback=None, obs=None):
n_steps = 0
rollout_buffer.reset()
rewards_ = []
while n_steps < n_rollout_steps:
actions, values, log_probs, _ = self.policy.call(obs)
actions = actions.numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
rewards_.append(rewards)
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, gym.spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
rollout_buffer.add(obs, actions, rewards, dones, values, log_probs)
obs = new_obs
rollout_buffer.compute_returns_and_advantage(values, dones=dones)
self._update_reward_buffer(rewards_)
return obs
@tf.function
def policy_loss(self, advantage, log_prob, old_log_prob, clip_range):
# Normalize advantage
advantage = (advantage - tf.reduce_mean(advantage)) / (tf.math.reduce_std(advantage) + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = tf.exp(log_prob - old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantage * ratio
policy_loss_2 = advantage * tf.clip_by_value(ratio, 1 - clip_range, 1 + clip_range)
return - tf.reduce_mean(tf.minimum(policy_loss_1, policy_loss_2))
@tf.function
def value_loss(self, values, old_values, return_batch, clip_range_vf):
if clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = old_values + tf.clip_by_value(values - old_values, -clip_range_vf, clip_range_vf)
# Value loss using the TD(gae_lambda) target
return tf.keras.losses.MSE(return_batch, values_pred)
def train(self, gradient_steps, batch_size=64):
# Update optimizer learning rate
# self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress)
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress)
else:
clip_range_vf = None
for gradient_step in range(gradient_steps):
approx_kl_divs = []
# Sample replay buffer
for replay_data in self.rollout_buffer.get(batch_size):
# Unpack
obs, action, old_values, old_log_prob, advantage, return_batch = replay_data
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action for float to long
action = action.astype(np.int64).flatten()
with tf.GradientTape() as tape:
tape.watch(self.policy.trainable_variables)
values, log_prob, entropy = self.policy.evaluate_actions(obs, action)
# Flatten
values = tf.reshape(values, [-1])
policy_loss = self.policy_loss(advantage, log_prob, old_log_prob, clip_range)
value_loss = self.value_loss(values, old_values, return_batch, clip_range_vf)
# Entropy loss favor exploration
entropy_loss = -tf.reduce_mean(entropy)
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Optimization step
gradients = tape.gradient(loss, self.policy.trainable_variables)
# Clip grad norm
# gradients = tf.clip_by_norm(gradients, self.max_grad_norm)
self.policy.optimizer.apply_gradients(zip(gradients, self.policy.trainable_variables))
approx_kl_divs.append(tf.reduce_mean(old_log_prob - log_prob).numpy())
if self.target_kl is not None and np.mean(approx_kl_divs) > 1.5 * self.target_kl:
print("Early stopping at step {} due to reaching max kl: {:.2f}".format(gradient_step,
np.mean(approx_kl_divs)))
break
explained_var = explained_variance(self.rollout_buffer.returns.flatten(),
self.rollout_buffer.values.flatten())
logger.logkv("clip_range", clip_range)
if self.clip_range_vf is not None:
logger.logkv("clip_range_vf", clip_range_vf)
logger.logkv("explained_variance", explained_var)
# TODO: gather stats for the entropy and other losses?
logger.logkv("entropy", entropy.numpy().mean())
logger.logkv("policy_loss", policy_loss.numpy())
logger.logkv("value_loss", value_loss.numpy())
if hasattr(self.policy, 'log_std'):
logger.logkv("std", tf.exp(self.policy.log_std).numpy().mean())
def pretrain(self, observations, expert_actions, nepochs=10000, nbatchesperepoch=10, val_split=0.25, patience=100):
n = observations.shape[0]
inds = np.arange(n)
inds_val, inds_tr = np.split(inds, [int(val_split * n)])
n_tr = len(inds_tr)
n_val = len(inds_val)
val_losses = deque(maxlen=10)
patience_count = 0
val_loss_min = +np.inf
best_weights = None
for epoch in range(nepochs):
train_loss = 0.0
for _ in range(nbatchesperepoch):
idx = inds_tr[np.random.choice(n_tr, self.batch_size)]
obs = observations[idx, :]
if isinstance(self.action_space, spaces.Discrete):
actions_ = expert_actions[idx]
elif isinstance(self.action_space, spaces.Box):
actions_ = expert_actions[idx, :]
with tf.GradientTape() as tape:
tape.watch(self.policy.trainable_variables)
actions, values, log_probs, action_logits = self.policy.call(obs)
if isinstance(self.action_space, spaces.Discrete):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions_, logits=action_logits))
elif isinstance(self.action_space, spaces.Box):
loss = tf.reduce_mean(tf.square(actions - actions_))
train_loss += loss
# Optimization step
gradients = tape.gradient(loss, self.policy.trainable_variables)
# Clip grad norm
# gradients = tf.clip_by_norm(gradients, self.max_grad_norm)
self.policy.optimizer.apply_gradients(zip(gradients, self.policy.trainable_variables))
val_loss = 0.0
for _ in range(nbatchesperepoch):
idx = inds_val[np.random.choice(n_val, self.batch_size)]
obs = observations[idx, :]
if isinstance(self.action_space, spaces.Discrete):
actions_ = expert_actions[idx]
elif isinstance(self.action_space, spaces.Box):
actions_ = expert_actions[idx, :]
actions, values, log_probs, action_logits = self.policy.call(obs)
if isinstance(self.action_space, spaces.Discrete):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions_, logits=action_logits))
elif isinstance(self.action_space, spaces.Box):
loss = tf.reduce_mean(tf.square(actions - actions_))
val_loss += loss
val_losses.append(val_loss / nbatchesperepoch)
print(f'At epoch {epoch + 1}/{nepochs}, train loss is {train_loss / nbatchesperepoch}, validation loss is {val_loss / nbatchesperepoch}, patience is {patience_count + 1}/{patience}')
if np.mean(val_losses) < val_loss_min:
val_loss_min = np.mean(val_losses)
patience_count = 0
best_weights = self.policy.get_weights()
else:
patience_count += 1
if patience_count >= patience:
self.policy.set_weights(best_weights)
break
def save(self, path):
# save weights
w_path = osp.join(path, 'model')
self.policy.save(w_path)
# save data
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"gae_lambda": self.gae_lambda,
"batch_size": self.batch_size,
"n_epochs": self.n_epochs,
"clip_range": self.clip_range,
"clip_range_vf": self.clip_range_vf,
"verbose": self.verbose,
"policy_class": self.policy_class,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"seed": self.seed,
"policy_kwargs": self.policy_kwargs
}
d_path = osp.join(path, 'params')
serialized_data = data_to_json(data)
with open(d_path, 'w') as f:
f.write(serialized_data)
def load(self, path):
# load data
d_path = osp.join(path, 'params')
with open(d_path, 'r') as f:
json_data = f.read()
data = json_to_data(json_data)
# weights
w_path = osp.join(path, 'model')
return data, w_path
def learn(self, total_timesteps, callback=None, log_interval=1, eval_env=None, eval_freq=-1, n_eval_episodes=5, tb_log_name="PPO", reset_num_timesteps=True):
timesteps_since_eval, iteration, evaluations, obs, eval_env = self._setup_learn(eval_env)
iteration += self.iteration_start
if self.tensorboard_log is not None:
self.tb_writer = tf.summary.create_file_writer(os.path.join(self.tensorboard_log, f'{tb_log_name}_{time.time()}'))
while self.num_timesteps < total_timesteps:
if callback is not None:
# Only stop training if return value is False, not when it is None.
if callback(locals(), globals()) is False:
break
obs = self.collect_rollouts(self.env, self.rollout_buffer, n_rollout_steps=self.n_steps, obs=obs)
iteration += 1
self.num_timesteps += self.n_steps * self.n_envs
timesteps_since_eval += self.n_steps * self.n_envs
self._update_current_progress(self.num_timesteps, total_timesteps)
# Display training infos
if self.verbose >= 1 and log_interval is not None and iteration % log_interval == 0:
if len(self.ep_reward_buffer) > 0:
fps = int(self.num_timesteps / (time.time() - self.start_time))
logger.logkv("iterations", iteration)
logger.logkv('ep_rew_mean', self.safe_mean(self.ep_reward_buffer))
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - self.start_time + self.time_elapsed_start))
logger.logkv("total timesteps", self.num_timesteps + self.num_timesteps_start)
logger.dumpkvs()
if iteration > self.iteration_start + 1:
self.save(self.modelpath)
self.train(self.n_epochs, batch_size=self.batch_size)
# Evaluate the agent
timesteps_since_eval = self._eval_policy(eval_freq, eval_env, n_eval_episodes, timesteps_since_eval, deterministic=True)
# For tensorboard integration
if self.tb_writer is not None:
with self.tb_writer.as_default():
if len(self.ep_reward_buffer) > 0:
tf.summary.scalar('Reward', self.safe_mean(self.ep_reward_buffer), self.num_timesteps)
return self
| [] |
2024-01-10 | sofia099/vector_search_demo | vector_embedding.py | import openai
import csv
import time
### EDIT BELOW
OPENAI_API_KEY = 'YOUR_OPENAI_API_KEY'
num_subsets = 9
### STOP
# set openai api key
openai.api_key = OPENAI_API_KEY
# generate embeddings
def generate_embeddings(values):
embeddings = openai.Embedding.create(input=values, model='text-embedding-ada-002')['data'][0]['embedding']
return embeddings
# read the csv file
def read_csv_file(file_path):
rows = []
with open(file_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
rows.append(row)
return rows
# write the embeddings to the csv file
def write_embeddings_to_csv(file_path, rows):
with open(file_path, 'w', newline='') as csvfile:
fieldnames = list(rows[0].keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in rows:
writer.writerow(row)
for i in range(1,num_subsets+1):
# read the rows from the csv file
input_csv_file_path = f"subset_{i}.csv"
rows = read_csv_file(input_csv_file_path)
# generate embeddings for the values
values = [row['movie_name']+' '+row['description'] for row in rows] # embedding the movie_name and description
try:
embeddings = [generate_embeddings(value) for value in values]
except openai.error.RateLimitError:
time.sleep(61) # sleep for at least 1min to avoid hitting RateLimitError again
embeddings = [generate_embeddings(value) for value in values]
except openai.error.RateLimitError:
time.sleep(300) # sleep for 5min to avoid hitting RateLimitError again
embeddings = [generate_embeddings(value) for value in values]
# append the embeddings to the rows
for row, embedding in zip(rows, embeddings):
row['embedding'] = embedding
write_embeddings_to_csv(input_csv_file_path, rows)
print(f"Embeddings for subset {i} done.") | [] |
2024-01-10 | liuzhengzhe/Towards-Implicit-Text-Guided-Shape-Generation | generation~model_res64.py | import os,csv
import time
import math
import random
import numpy as np
import h5py
import glob
import scipy.interpolate
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from scipy.interpolate import RegularGridInterpolator
import mcubes
import mcubes as mc
from utils import *
import copy
from mcubes import marching_cubes #, grid_interp
#pytorch 1.2.0 implementation
#from dalle_pytorch import OpenAIDiscreteVAE, DALLE
#from dalle_pytorch.transformer import Transformer,Transformer_mutual
from transformers import AutoModelForSequenceClassification, AutoConfig
from pytorch_lamb import Lamb
from transformers import (
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
)
def grid_interp(vol, points):
"""
Interpolate volume data at given points
Inputs:
vol: 4D torch tensor (C, Nz, Ny, Nx)
points: point locations (Np, 3)
Outputs:
output: interpolated data (Np, C)
"""
#vol=torch.from_numpy(vol)#.cuda()
if vol.is_cuda:
return mc.grid_interp_cuda(vol, points)
else:
return mc.grid_interp_cpu(vol, points) #'''===
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len = 80):
super().__init__()
self.d_model = d_model
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
#print ('xshape', x.shape, seq_len)
x = x + Variable(self.pe[:,:seq_len], requires_grad=False).cuda()
return x
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
#print ('mask score ', mask.shape, scores.shape)
#print ('s1',scores.shape)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
#print ('s2',scores.shape)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
#print ('output',output.shape)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model, bias=True)
self.v_linear = nn.Linear(d_model, d_model, bias=True)
self.k_linear = nn.Linear(d_model, d_model, bias=True)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model, bias=True)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
#print (k.shape, q.shape, v.shape, self.d_k, mask.shape)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
#print ('score',scores.shape)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous()\
.view(bs, -1, self.d_model)
#print ('cct',concat.shape)
output = self.out(concat)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=16, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff, bias=True)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model, bias=True)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-5):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
#self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
#self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(heads, d_model) #nn.MultiheadAttention(embed_dim=16, num_heads=4)
self.ff = FeedForward(d_model).cuda()
def forward(self, x, e_outputs, src_mask):
#print ('1',self.norm_2.bias)
#x2 = self.norm_1(x)
#x = x + self.dropout_1(self.attn_1(x2, x2, x2)) # trg_mask
x = self.norm_2(x)
#print ('2',torch.unique(x))
#x=torch.transpose(x,0,1)
#e_outputs=torch.transpose(e_outputs,0,1)
#print ('x,e',x.shape, e_outputs.shape)
#print (self.attn_2(x, e_outputs, e_outputs)[0].shape, x.shape)
x = x +self.dropout_2(self.attn_2(x, e_outputs, e_outputs.clone(), src_mask))
# x=torch.transpose(x,0,1)
#print ('3',torch.unique(x))
x = self.norm_3(x)
#print ('4',torch.unique(x))
x = x+self.dropout_3(self.ff(x))
#print ('5',torch.unique(x))
return x
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class generator(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
'''dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
#print (points.shape, z.shape)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
#print ('pointz',torch.unique(pointz), torch.unique(texts))
#print ('weight', torch.unique(self.linear_text_k.weight), torch.unique(self.linear_shape_q.weight))
#print ('bias', torch.unique(self.linear_text_k.bias), torch.unique(self.linear_shape_q.bias))
x=linear_shape_q
src_mask=masks
#print (masks.shape)
'''x = self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
linear_text_k = self.pe(linear_text_k)
#print ('x1',torch.unique(x),self.linear_text_k.)
#print ('linear_text_k',torch.unique(linear_text_k))
for i in range(self.N):
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#print (torch.unique(pointz),torch.unique(x))
pointz = torch.cat([pointz, x],2)
#print (torch.unique(position_sense_feat))
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l7
class generator_color(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator_color, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
#multihead_attn = nn.MultiheadAttention(embed_dim=16, num_heads=4)
#self.transformer_model = nn.Transformer(d_model=16, nhead=4, num_encoder_layers=0, num_decoder_layers=1, dim_feedforward=16)
'''self.softmax=torch.nn.Softmax(1)
dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
pointz = torch.cat([points,zs],2)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
x=linear_shape_q
#linear_text_k = self.pe(linear_text_k)
#print ('generator color',torch.unique(x))
src_mask=masks
for i in range(self.N):
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#torch.nn.Transformer(d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', custom_encoder=None, custom_decoder=None)
#attn_output, attn_output_weights = multihead_attn(x, key, value)
#print (x.shape,linear_text_k.shape)
#x = self.transformer_model(torch.transpose(linear_text_k,0,1), torch.transpose(x,0,1) )
#print (x.shape)
#x=torch.transpose(x,0,1)
#print (torch.unique(pointz),torch.unique(x))
#print (masks.shape)
'''x =self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
'''linear_text_k = self.linear_text_k(texts)
linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz)
att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
pointz = torch.cat([pointz, x],2)
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
#l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
#l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l8
class encoder(nn.Module):
def __init__(self, ef_dim, z_dim):
super(encoder, self).__init__()
self.ef_dim = ef_dim
self.z_dim = z_dim
self.conv_1 = nn.Conv3d(1+3, self.ef_dim, 4, stride=2, padding=1, bias=False)
self.in_1 = nn.InstanceNorm3d(self.ef_dim)
self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim*2, 4, stride=2, padding=1, bias=False)
self.in_2 = nn.InstanceNorm3d(self.ef_dim*2)
self.conv_3 = nn.Conv3d(self.ef_dim*2, self.ef_dim*4, 4, stride=2, padding=1, bias=False)
self.in_3 = nn.InstanceNorm3d(self.ef_dim*4)
self.conv_4 = nn.Conv3d(self.ef_dim*4, self.ef_dim*8, 4, stride=2, padding=1, bias=False)
self.in_4 = nn.InstanceNorm3d(self.ef_dim*8)
self.conv_5 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
self.conv_6 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv_1.weight)
nn.init.xavier_uniform_(self.conv_2.weight)
nn.init.xavier_uniform_(self.conv_3.weight)
nn.init.xavier_uniform_(self.conv_4.weight)
nn.init.xavier_uniform_(self.conv_5.weight)
nn.init.constant_(self.conv_5.bias,0)
nn.init.xavier_uniform_(self.conv_6.weight)
nn.init.constant_(self.conv_6.bias,0)
def forward(self, inputs, is_training=False):
#print ('input',inputs.shape)
d_1 = self.in_1(self.conv_1(inputs))
d_1 = F.leaky_relu(d_1, negative_slope=0.02, inplace=True)
d_2 = self.in_2(self.conv_2(d_1))
d_2 = F.leaky_relu(d_2, negative_slope=0.02, inplace=True)
d_3 = self.in_3(self.conv_3(d_2))
d_3 = F.leaky_relu(d_3, negative_slope=0.02, inplace=True)
d_4 = self.in_4(self.conv_4(d_3))
d_4 = F.leaky_relu(d_4, negative_slope=0.02, inplace=True)
d_5 = self.conv_5(d_4)
d_5 = d_5.view(-1, self.z_dim)
d_5 = torch.sigmoid(d_5)
d_6 = self.conv_6(d_4)
d_6 = d_6.view(-1, self.z_dim)
d_6 = torch.sigmoid(d_6)
return d_5, d_6
class im_network(nn.Module):
def __init__(self, ef_dim, gf_dim, z_dim, point_dim):
super(im_network, self).__init__()
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.z_dim = z_dim
self.point_dim = point_dim
self.encoder = encoder(self.ef_dim, self.z_dim)
pretrained_path='bert-base-uncased'
config = AutoConfig.from_pretrained(
str(pretrained_path), #num_labels=len(dataBunch.labels)
)
self.model = AutoModelForSequenceClassification.from_pretrained(
str(pretrained_path), config=config, state_dict=None
)
self.encoder = encoder(self.ef_dim, self.z_dim)
self.generator = generator(self.z_dim, self.point_dim, self.gf_dim)
self.generator_color = generator_color(self.z_dim, self.point_dim, self.gf_dim)
def forward(self, texts, masks, inputs, z_vector, z_vector_color, z_vector_c2,out_all,point_coord, words, is_training=False):
if texts!=None:
text_inputs = {
"input_ids": texts,
"attention_mask": masks,
}
if is_training:
#print ('traiing')
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
#net_out_color = self.generator_color(point_coord, z_vector_c2, z_vector, is_training=is_training)
z_vector,z_vector_color, z_vector_c2, words = self.model(**text_inputs)
#net_out = self.generator(point_coord, z_vector, words, masks.detach(), is_training=is_training)
#residue_color = self.generator_color(point_coord, z_vector_c2, words, masks.detach(), is_training=1)
net_out_std = self.generator(point_coord, z_vector_std, words, masks.detach(), is_training=is_training)
residue_color_std= self.generator_color(point_coord, z_vector_c_std,words, masks.detach(), is_training=1)
return z_vector,z_vector_color, z_vector_c2, z_vector_std,None, z_vector_c_std,net_out_std, residue_color_std, net_out_std, residue_color_std, words
else:
if texts is not None:
z_vector,z_vector_color, z_vector_c2, words = self.model(**text_inputs)
return z_vector, None, z_vector_c2,None, None,None,None, words
if z_vector is not None and point_coord is not None:
#print (point_coord.shape, z_vector.shape)
net_out = self.generator(point_coord, z_vector, words, masks, is_training=is_training)
net_out_color = self.generator_color(point_coord, z_vector_c2, words, masks, is_training=is_training)
#print ('net out unique', torch.unique(net_out))
return None,None,None, net_out, net_out_color, None, None #, residue_color+s1_color, s1_color
#elif z_vector is not None and point_coord is not None:
# net_out = self.generator(point_coord, z_vector, is_training=is_training)
# return None,None,None, net_out, None,None,None,
elif (inputs is not None) and (inputs.shape[1]==4):
#z_vector_std, z_vector_color_std, z_vector_c2_std = self.encoder(inputs, is_training=is_training)
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
return z_vector_std,None, z_vector_c_std,None, None,None,None #, net_out, None,None,None,
class IM_res64(object):
def __init__(self, config):
#progressive training
#1-- (16, 16*16*16)
#2-- (32, 16*16*16)
#3-- (64, 16*16*16*4)
self.sample_vox_size = config.sample_vox_size
print (self.sample_vox_size)
if self.sample_vox_size==16:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==32:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 40
elif self.sample_vox_size==64:
self.load_point_batch_size = 16*16*16*4
self.point_batch_size = 16*16*16
self.shape_batch_size = 12
self.input_size = 64 #input voxel grid size
self.ef_dim = 32
self.gf_dim = 128
self.z_dim = 256
self.point_dim = 3
self.dataset_name = config.dataset
#self.dataset_load = self.dataset_name + '_train'
#self.data_paths=glob.glob('hdf5/*.hdf5') #/ccd5e*.hdf5')
self.datas=[]
#start=1
with open('train_official.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
#if start==1:
# start=0
# continue
text=row[2]
name=row[1]
self.datas.append((text,name))
#break
#for i in range(32):
# self.datas.append(self.datas[0])
if not (config.train):# or config.getz):
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
self.datas=[]
with open('test_official.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
text=row[2]
name=row[1]
text_str=row[0]
self.datas.append((text,name,text_str))
#self.data_paths.sort()
#self.dataset_load = self.dataset_name + '_test'
self.checkpoint_dir = config.checkpoint_dir
self.data_dir = config.data_dir
#data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
#print ('data name lzz',data_hdf5_name)
'''if not (config.train or config.getz):
self.dataset_load = self.dataset_name + '_test'
data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
data_dict = h5py.File(data_hdf5_name, 'r')
print ('load')
self.data_points = (data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5
self.data_values = data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32)
self.data_colors = data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0
self.data_voxels = data_dict['voxels'][:]
self.data_voxels_colors = data_dict['voxels_colors'][:]/255.0
self.data_voxels_colors = np.transpose(self.data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors = np.reshape(self.data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size])
#reshape to NCHW
self.data_voxels = np.reshape(self.data_voxels, [-1,1,self.input_size,self.input_size,self.input_size])
#else:
# print("error: cannot load "+data_hdf5_name)
# exit(0)'''
#print ('loaded')
if torch.cuda.is_available():
self.device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device('cpu')
#build model
self.im_network = im_network(self.ef_dim, self.gf_dim, self.z_dim, self.point_dim)
self.im_network.to(self.device)
#print params
#for param_tensor in self.im_network.encoder.parameters():
# param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
'''for param_tensor in self.im_network.generator.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator_color.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
#for param_tensor in self.im_network.encoder_color.parameters():
# param_tensor.requires_grad=False '''
params = list(self.im_network.generator.parameters())
#params.extend(list(self.im_network.generator_color.parameters()))
#params.extend(list(self.im_network.encoder.parameters()))
self.optimizer2 = torch.optim.Adam([{"params": self.im_network.generator.parameters()}, {"params": self.im_network.generator_color.parameters()}, {"params": self.im_network.encoder.parameters()}], lr=config.learning_rate*0.1, betas=(config.beta1, 0.999))
self.optimizer = self.get_optimizer(0.0001, optimizer_type="lamb")
base_params=[]
#for param_tensor in self.im_network.encoder.parameters():
# base_params.append(param_tensor)
for param_tensor in self.im_network.generator.parameters():
base_params.append(param_tensor)
for param_tensor in self.im_network.generator_color.parameters():
base_params.append(param_tensor)
#self.optimizer = torch.optim.Adam([{'params': base_params}, {'params': self.im_network.model.parameters(), 'lr': 0.001}], lr=config.learning_rate*1, betas=(config.beta1, 0.999))
#self.scheduler = self.get_scheduler(
# self.optimizer, t_total=int(60470*config.epoch), schedule_type="warmup_cosine"
#)
#pytorch does not have a checkpoint manager
#have to define it myself to manage max num of checkpoints to keep
self.max_to_keep = 2
self.checkpoint_path = os.path.join(self.checkpoint_dir, self.model_dir)
self.checkpoint_name='res64.model'
self.checkpoint_manager_list = [None] * self.max_to_keep
self.checkpoint_manager_pointer = 0
#loss
def network_loss(G,point_value):
return torch.mean((G-point_value)**2)
self.loss = network_loss
def color_loss(G,point_color,mask):
return torch.mean(((G-point_color)*mask)**2)
self.color_loss = color_loss
#keep everything a power of 2
self.cell_grid_size = 4
self.frame_grid_size = 64
self.real_size = self.cell_grid_size*self.frame_grid_size #=256, output point-value voxel grid size in testing
self.test_size = 32 #related to testing batch_size, adjust according to gpu memory size
self.test_point_batch_size = self.test_size*self.test_size*self.test_size #do not change
self.test_point_batch_size_in_training=4096
#get coords for training
dima = self.test_size
dim = self.frame_grid_size
self.aux_x = np.zeros([dima,dima,dima],np.uint8)
self.aux_y = np.zeros([dima,dima,dima],np.uint8)
self.aux_z = np.zeros([dima,dima,dima],np.uint8)
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
for i in range(dima):
for j in range(dima):
for k in range(dima):
self.aux_x[i,j,k] = i*multiplier
self.aux_y[i,j,k] = j*multiplier
self.aux_z[i,j,k] = k*multiplier
self.coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
self.coords[i*multiplier2+j*multiplier+k,:,:,:,0] = self.aux_x+i
self.coords[i*multiplier2+j*multiplier+k,:,:,:,1] = self.aux_y+j
self.coords[i*multiplier2+j*multiplier+k,:,:,:,2] = self.aux_z+k
self.coords = (self.coords.astype(np.float32)+0.5)/dim-0.5
self.coords = np.reshape(self.coords,[multiplier3,self.test_point_batch_size,3])
self.coords = torch.from_numpy(self.coords)
self.coords = self.coords.to(self.device)
#get coords for testing
dimc = self.cell_grid_size
dimf = self.frame_grid_size
self.cell_x = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_y = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_z = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_coords = np.zeros([dimf,dimf,dimf,dimc,dimc,dimc,3],np.float32)
self.frame_coords = np.zeros([dimf,dimf,dimf,3],np.float32)
self.frame_coords_train = torch.zeros([16,16,16,3]).cuda()
self.frame_coords_train32 = torch.zeros([32,32,32,3]).cuda()
self.frame_x = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_y = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_z = np.zeros([dimf,dimf,dimf],np.int32) #.long()
for i in range(dimc):
for j in range(dimc):
for k in range(dimc):
self.cell_x[i,j,k] = i
self.cell_y[i,j,k] = j
self.cell_z[i,j,k] = k
for i in range(dimf):
for j in range(dimf):
for k in range(dimf):
self.cell_coords[i,j,k,:,:,:,0] = self.cell_x+i*dimc
self.cell_coords[i,j,k,:,:,:,1] = self.cell_y+j*dimc
self.cell_coords[i,j,k,:,:,:,2] = self.cell_z+k*dimc
self.frame_coords[i,j,k,0] = i
self.frame_coords[i,j,k,1] = j
self.frame_coords[i,j,k,2] = k
self.frame_x[i,j,k] = i
self.frame_y[i,j,k] = j
self.frame_z[i,j,k] = k
for i in range(16):
for j in range(16):
for k in range(16):
self.frame_coords_train[i,j,k,0] = i
self.frame_coords_train[i,j,k,1] = j
self.frame_coords_train[i,j,k,2] = k
for i in range(32):
for j in range(32):
for k in range(32):
self.frame_coords_train32[i,j,k,0] = i
self.frame_coords_train32[i,j,k,1] = j
self.frame_coords_train32[i,j,k,2] = k
self.cell_coords = (self.cell_coords.astype(np.float32)+0.5)/self.real_size-0.5
self.cell_coords = np.reshape(self.cell_coords,[dimf,dimf,dimf,dimc*dimc*dimc,3])
self.cell_x = np.reshape(self.cell_x,[dimc*dimc*dimc])
self.cell_y = np.reshape(self.cell_y,[dimc*dimc*dimc])
self.cell_z = np.reshape(self.cell_z,[dimc*dimc*dimc])
self.frame_x = np.reshape(self.frame_x,[dimf*dimf*dimf])
self.frame_y = np.reshape(self.frame_y,[dimf*dimf*dimf])
self.frame_z = np.reshape(self.frame_z,[dimf*dimf*dimf])
self.frame_coords = (self.frame_coords+0.5)/dimf-0.5
self.frame_coords = np.reshape(self.frame_coords,[dimf*dimf*dimf,3])
self.frame_coords_train = (self.frame_coords_train+0.5)/16.0-0.5
self.frame_coords_train = torch.reshape(self.frame_coords_train,[16*16*16,3])
self.frame_coords_train32 = (self.frame_coords_train32+0.5)/32.0-0.5
self.frame_coords_train32 = torch.reshape(self.frame_coords_train32,[32*32*32,3])
#self.conv_edge = nn.Conv3d(3, 3, 3, stride=1, padding=1, groups=3, bias=False)
#self.conv_edge.to(self.device)
self.sampling_threshold = 0.5 #final marching cubes threshold
self.upsample=nn.Upsample(scale_factor=4,mode='trilinear').cuda()
self.upsample32=nn.Upsample(scale_factor=2,mode='trilinear').cuda()
def get_optimizer(self, lr, optimizer_type="lamb"):
# Prepare optimiser and schedule
no_decay = [] #"bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.im_network.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.0, #self.weight_decay,
},
]
if optimizer_type == "lamb":
optimizer = Lamb(optimizer_grouped_parameters, lr=lr, eps=1e-8)
elif optimizer_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8
)
return optimizer
def get_scheduler(self, optimizer, t_total, schedule_type="warmup_cosine"):
SCHEDULES = {
"warmup_cosine": get_cosine_schedule_with_warmup,
}
if schedule_type == None or schedule_type == "none":
return SCHEDULES[schedule_type](optimizer)
elif schedule_type == "warmup_constant":
return SCHEDULES[schedule_type](
optimizer, num_warmup_steps=0 #self.warmup_steps
)
else:
return SCHEDULES[schedule_type](
optimizer,
num_warmup_steps=0, #self.warmup_steps,
num_training_steps=t_total,
)
def z2voxel(self, z, z_color, words, masks, config):
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
#print (model_float.shape)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
#print (dimf #64, dimf**3,262144, self.test_point_batch_size, 32768 , frame_batch_num 8)
#get frame grid values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_,_, model_out_, color_out_,_,_ = self.im_network(None,masks,None, z,None, z_color,None, point_coord, words, is_training=False)
#print ('cube 0',torch.unique(color_out_.detach()))
#print ('model out', model_out_.shape, color_out_.shape) torch.Size([1, 32768, 1]) torch.Size([1, 32768, 3])
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
#print (color_out_.shape)
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
#print (frame_flag.shape, x_coords,y_coords,z_coords, x_coords+1, y_coords+1,z_coords+1)
#print (model_out.shape, color_out.shape, self.test_point_batch_size, color_flag[:,x_coords,y_coords,z_coords].shape) (32768, 1) (32768, 3) 32768 (3, 32768)
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
#print (x_coords,y_coords,z_coords,x_coords.shape,y_coords.shape,z_coords.shape)
#print ('cube 1',color_out.shape, np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]).shape, np.reshape(color_out, [3, self.test_point_batch_size]).shape, np.unique(color_cube), color_cube[:,x_coords,y_coords,z_coords].shape, frame_flag[x_coords+1,y_coords+1,z_coords+1].shape)
if config.high_resolution:
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_,_, model_out_batch_, color_out_batch_,_,_ = self.im_network(None, masks,None,z,None,z_color,None, cell_coords, words, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, frame_flag, color_cube
@property
def model_dir(self):
return "{}_ae_{}".format(self.dataset_name, self.input_size)
def train(self, config):#64: 0-100 with cyc loss gpu0, 100-200 no cyc loss gpu1
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
print (checkpoint_txt)
if 1: #os.path.exists(checkpoint_txt):
pass
'''model_dir='checkpoint/color_all_ae_64/IM_AE.model64-239_raw.pth' #'init_32_499.pth' #$'../im-color-base/checkpoint/color_all_ae_64/IM_AE.model32-499_raw.pth' #'init.pth'
self.im_network.load_state_dict(torch.load(model_dir),strict=False)'''
model_dir= config.initialize #/mnt/sda/lzz/merge-cyclic-mulit-att/checkpoint/color_all_ae_64/IM_AE.model32-199.pth' #'IM_AE.model32-189_raw.pth' #'/mnt/sda/lzz/merge-nocyclic-multi-att/checkpoint/color_all_ae_64_2/IM_AE.model32-189_raw.pth' #'checkpoint/color_all_ae_64/IM_AE.model32-199_raw.pth' #'../reg149-cg-1.pth' #'IM_AE.model32-169_raw.pth' #'/mnt/sda/lzz/merge-nocyclic-multi-att/checkpoint/color_all_ae_64_2/IM_AE.model32-189_raw.pth'
model=torch.load(model_dir)
'''model2={}
for k in model.keys():
if 'bert' in k: #or 'generator' in k:
model2[k]=model[k]'''
self.im_network.load_state_dict(model,strict=False)
print(" [*] Load SUCCESS",model_dir)
else:
print(" [!] Load failed...")
shape_num = len(self.datas)
batch_index_list = np.arange(shape_num)
print("\n\n----------net summary----------")
print("training samples ", shape_num)
print("-------------------------------\n\n")
start_time = time.time()
assert config.epoch==0 or config.iteration==0
training_epoch = config.epoch + int(config.iteration/shape_num)
batch_num = int(shape_num/self.shape_batch_size)
point_batch_num = int(self.load_point_batch_size/self.point_batch_size)
for epoch in range(0, training_epoch): #0-50 no l2 norm, 50-100 l2 norm, both 2dec #150-200 no l2 norm, and big loss #1dec 50-100 big cyc loss, 100-150
self.im_network.train()
np.random.shuffle(batch_index_list)
avg_loss_sp = 0
avg_loss_color = 0
avg_loss_color2 = 0
avg_loss_value = 0
avg_value_out =0
avg_color_out =0
avg_value_out_std =0
avg_color_out_std =0
avg_loss_value_rec =0
avg_loss_color_rec =0
avg_num = 0
self.data_points=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_values=np.zeros((self.shape_batch_size,self.load_point_batch_size,1))
self.data_colors=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_voxels=np.zeros((self.shape_batch_size,1,64,64,64))
self.data_voxels_colors=np.zeros((self.shape_batch_size,3,64,64,64))
#self.pred_voxels=torch.zeros((self.shape_batch_size,1,64,64,64)).to(self.device)
#self.pred_voxels_colors=torch.zeros((self.shape_batch_size,3,64,64,64)).to(self.device)
for idx in range(batch_num):
#print (idx)
dxb = batch_index_list[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]
#print (dxb)
self.data_points[:]=0
self.data_values[:]=0
self.data_colors[:]=0
self.data_voxels[:]=0
self.data_voxels_colors[:]=0
#self.pred_voxels[:]=0
#self.pred_voxels_colors[:]=0
batch_paths=np.asarray(self.datas)[dxb]
texts=np.zeros((batch_paths.shape[0], 64))
masks=np.zeros((batch_paths.shape[0], 64))
for b in range(batch_paths.shape[0]): #path in batch_paths:
text_list=batch_paths[b][0].split(' ')[:-1] #.astype('int')
text_array = np.asarray(list(map(int, text_list)))
path='../hdf5_train_new/'+batch_paths[b][1]+'.hdf5'
name=batch_paths[b][1]
data_dict = h5py.File(path, 'r')
self.data_points[b,:,:]=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values[b,:,:]=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors[b,:,:]=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
texts[b,:min(64,len(text_list))]=text_array[:min(64,len(text_list))]
masks[b,:min(64,len(text_list))]=1
#print (self.data_points.shape,self.data_values.shape, self.data_colors.shape)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors[b,:,:,:,:]=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels[b,:,:,:,:]=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
#print ('datapoints', data_dict['points_'+str(self.sample_vox_size)].shape, self.data_points.shape)
batch_voxels = self.data_voxels.astype(np.float32) #[dxb].astype(np.float32)
batch_voxels_colors = self.data_voxels_colors.astype(np.float32) # [dxb].astype(np.float32)
if point_batch_num==1:
point_coord = self.data_points#[dxb]
point_value = self.data_values#[dxb]
point_color = self.data_colors#[dxb]
else:
which_batch = 0 #np.random.randint(point_batch_num)
point_coord = self.data_points[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size] #[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_value = self.data_values[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_color = self.data_colors[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
batch_voxels = torch.from_numpy(batch_voxels).float()
batch_voxels_colors = torch.from_numpy(batch_voxels_colors).float()
#step=1 #round(batch_voxels_colors.shape[-1]/self.sample_vox_size)
#print (step)
#batch_voxels_colors_16=batch_voxels_colors[:,:,0:64:step,0:64:step,0:64:step].to(self.device)
#print ('voxels color 16',batch_voxels_colors_16.shape)
point_coord = torch.from_numpy(point_coord).float()
point_value = torch.from_numpy(point_value).float()
point_color = torch.from_numpy(point_color).float()
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = batch_voxels_colors.to(self.device)
point_coord = point_coord.to(self.device)
point_value = point_value.to(self.device)
point_color = point_color.to(self.device)
texts=torch.from_numpy(texts).to(self.device).long()
masks=torch.from_numpy(masks).to(self.device).bool()
self.im_network.zero_grad()
z_vector,z_vector_color, z_vector_c2, z_vector_std, z_vector_color_std, z_vector_color2_std, net_out, residue_color, net_out_std, residue_color_std, words = self.im_network(texts,masks, torch.cat((batch_voxels,batch_voxels_colors),1), None,None,None,None, point_coord, None, is_training=True)
with torch.no_grad():
frame_batch_num = 1
point_coord = self.frame_coords_train
point_coord = torch.unsqueeze(point_coord, 0)
point_coord = point_coord.repeat(z_vector.shape[0],1,1)
_,_,_,model_out,color_final,_,_ = self.im_network(None, masks, None, z_vector, z_vector_color, z_vector_c2, None, point_coord, words, is_training=False)
model_out[torch.where(model_out>self.sampling_threshold)]=1
model_out[torch.where(model_out<=self.sampling_threshold)]=0
model_out=torch.reshape(model_out, (-1,1,16,16,16))
pred_shape=self.upsample(model_out) #self.pred_voxels[:]=
color_final=torch.transpose(color_final,1,2)
color_final=torch.reshape(color_final, (-1,3,16,16,16))
pred_color=self.upsample(color_final)
#pred_color[:,0,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]=0
#pred_color[:,1,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]=0
#pred_color[:,2,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]=0 #gt mask 0 3, pred mask 50 7, no mask 100 5
z_vector_rec, z_vector_c2_rec =self.im_network.encoder(torch.cat((pred_shape, pred_color),1), is_training=False)
'''batch_voxels_colors[:,2,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]=pred_color[:,2,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]
batch_voxels_colors[:,1,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]=pred_color[:,1,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]
batch_voxels_colors[:,0,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]=pred_color[:,0,:,:,:][torch.where(batch_voxels[:,0,:,:,:]==0)]
origin_shape=torch.cat((batch_voxels,batch_voxels_colors),1)
z_vector_std2, z_vector_color2_std2 =self.im_network.encoder(origin_shape, is_training=False)'''
#z_vector_rec=z_vector_rec.detach()
#z_vector_c2_rec=z_vector_c2_rec.detach()
errSP_value = self.loss(z_vector, z_vector_std)*1
errSP_color2 = self.loss(z_vector_c2, z_vector_color2_std)*1.0
#errSP_value_out = self.loss(net_out, point_value)*0.02
point_value3_2=point_value.repeat(1,1,3)
#errSP_color_out = self.color_loss(residue_color, point_color, point_value3_2)*1 #0.002, 0.1, gpu2, 100-150; 0.02,1, gpu0 0-50
errSP_value_out_std = self.loss(net_out_std, point_value)*1
errSP_color_out_std = self.color_loss(residue_color_std, point_color, point_value3_2)*10.0
errSP_value_rec = self.loss(z_vector_rec, z_vector_std)*0.001
errSP_color2_rec = self.loss(z_vector_c2_rec, z_vector_color2_std)*0.0005
errSP=errSP_value+ errSP_color2+ errSP_value_out_std+errSP_color_out_std +errSP_value_rec + errSP_color2_rec# +errSP_value_rec+errSP_color_rec+errSP_color2_rec +errSP_value_rec_text +errSP_color_rec_text +errSP_color2_rec_text
errSP.backward()
#nn.utils.clip_grad_norm(list(self.im_network.generator_color.parameters())+list(self.im_network.dalle.parameters()) , 0.05)
torch.nn.utils.clip_grad_norm_(
self.im_network.parameters(), 1
)
self.optimizer.step()
self.optimizer2.step()
avg_loss_value += errSP_value.item()
avg_loss_color2 += errSP_color2.item()
#avg_value_out += errSP_value_out.item()
#avg_color_out += errSP_color_out.item()
avg_value_out_std += errSP_value_out_std.item()
avg_color_out_std += errSP_color_out_std.item()
avg_loss_value_rec += errSP_value_rec.item()
avg_loss_color_rec += errSP_color2_rec.item()
#avg_loss_color2_rec += errSP_color2_rec.item()
'''avg_loss_value_rec += errSP_value_rec.item()
avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()
avg_loss_value_rec_text += errSP_value_rec_text.item()
avg_loss_color_rec_text += errSP_color_rec_text.item()
avg_loss_color2_rec_text += errSP_color2_rec_text.item()'''
avg_loss_sp += errSP.item()
avg_num += 1
#print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f, loss_value_out_std: %.6f, loss_color_out_std: %.6f, loss_value_sp_rec: %.6f, loss_color_2_rec: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_value_out_std/avg_num, avg_color_out_std/avg_num, avg_loss_value_rec/avg_num, avg_loss_color2_rec/avg_num, avg_loss_sp/avg_num))
print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f, loss_value_out_std: %.6f, loss_color_out_std: %.6f, loss_value_rec: %.6f, loss_color_rec: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_value_out_std/avg_num, avg_color_out_std/avg_num, avg_loss_value_rec/avg_num, avg_loss_color_rec/avg_num, avg_loss_sp/avg_num))
if epoch%10==9:
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+"_raw.pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+".pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
#print (model_float.shape)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
#print (dimf #64, dimf**3,262144, self.test_point_batch_size, 32768 , frame_batch_num 8)
#get frame grid values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_, model_out_, color_out_ = self.im_network(None, z, z_color, point_coord, is_training=False)
#print ('cube 0',torch.unique(color_out_.detach()))
#print ('model out', model_out_.shape, color_out_.shape) torch.Size([1, 32768, 1]) torch.Size([1, 32768, 3])
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
#print (color_out_.shape)
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
#print (frame_flag.shape, x_coords,y_coords,z_coords, x_coords+1, y_coords+1,z_coords+1)
#print (model_out.shape, color_out.shape, self.test_point_batch_size, color_flag[:,x_coords,y_coords,z_coords].shape) (32768, 1) (32768, 3) 32768 (3, 32768)
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
#print (x_coords,y_coords,z_coords,x_coords.shape,y_coords.shape,z_coords.shape)
#print ('cube 1',color_out.shape, np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]).shape, np.reshape(color_out, [3, self.test_point_batch_size]).shape, np.unique(color_cube), color_cube[:,x_coords,y_coords,z_coords].shape, frame_flag[x_coords+1,y_coords+1,z_coords+1].shape)
#get queue and fill up ones
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
#model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print (i,j,k,color_cube[0,i,j,k]*255,color_cube[1,i,j,k]*255,color_cube[2,i,j,k]*255)
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
#color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
#color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
#color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print ('c',color_cube[:,i,j,k], color_cube[:,i,j,k].shape)
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_, model_out_batch_, color_out_batch_ = self.im_network(None, z,z_color, cell_coords, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
#print (model_out_batch.shape, color_out_batch.shape)
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
#color_out = color_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,:]
#print ('color out',color_out.shape)
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
#for c in range(3):
# color_cube_float[c,x_coords+1,y_coords+1,z_coords+1] = color_out[:,c]
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, color_cube
#output shape as ply and point cloud as ply
def test_mesh_point(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if 1:
model_dir='checkpoint/color_all_ae_64/res64.model64-199.pth'
self.im_network.load_state_dict(torch.load(model_dir),strict=True)
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#print (self.im_network)
#self.im_network.model.dropout.train()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
idx=0
for data in self.datas[config.start:config.end]:
text_list=data[0].split(' ')[:-1] #.astype('int')
text_array = np.asarray(list(map(int, text_list)))
#print (data[1])
#if 'b0531a' not in data[1]: #c3b6c, ad174, b4ee137
# continue
path='../hdf5_test_new/'+data[1]+'.hdf5'
data_dict = h5py.File(path, 'r')
#path=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5_test/cd942*')[0]
name=path.split('/')[-1]
#if os.path.exists("val/"+str(name)+str(data[2][:50])+"_mesh_pred.ply"):
# continue
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels_colors))
texts=np.zeros((1, 32))
masks=np.zeros((1, 32))
texts[0,:min(32,len(text_list))]=text_array[:min(32,len(text_list))]
masks[0,:min(32,len(text_list))]=1
texts=torch.from_numpy(texts).to(self.device).long()
masks=torch.from_numpy(masks).to(self.device).bool()
#z_vector, _, _ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None, None, is_training=False)
#model_z,z_vector_color,out_all,_,_,_,_ = self.im_network(texts, masks, None, None, None,None, None, is_training=False)
#model_z, _, z_vector_c2,_,_,_,_ = self.im_network(texts, masks, None, None,None, None,None, None, is_training=False)
model_z,_, z_vector_c2, _,_,_,_, words = self.im_network(texts,masks, None, None,None,None,None, None, None)
#print (model_z.shape,z_vector_color.shape,'modelz')
#z_path=glob.glob('../fast-bert-color/result_test/'+name.split('.')[0]+'*')[0]
'''z_path=glob.glob('../fast-bert-color/result_train/'+'*')[idx]
name=z_path.split('/')[-1]
z_cct=np.load(z_path)
text=z_path.split('/')[-1]
z_cct=torch.from_numpy(z_cct).cuda()
print (idx,z_cct.shape)
model_z=torch.unsqueeze(z_cct[:256],0)
z_vector_color=torch.unsqueeze(z_cct[256:],0)
idx+=1'''
model_float, color_cube_float, frame_flag, color_cube = self.z2voxel(model_z, z_vector_c2, words, masks, config)
from plyfile import PlyData,PlyElement
#print (color_cube.shape,'color cube',model_float.shape,np.unique(color_cube))
some_array=[]
size=258
for i in range(1,64):
for j in range(1,64):
for k in range(1,64):
if frame_flag[1:-1,1:-1,1:-1][int(i),int(j),int(k)]>0.5:
some_array.append((i,j,k,color_cube[2,int(i),int(j),int(k)]*255,color_cube[1,int(i),int(j),int(k)]*255,color_cube[0,int(i),int(j),int(k)]*255))
some_array = np.array(some_array, dtype=[('x', 'float32'), ('y', 'float32'), ('z', 'float32'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8')])
el = PlyElement.describe(some_array, 'vertex')
PlyData([el]).write('result/res64/'+name+str(data[2][:50].replace('/',' '))+'test_new_input.ply')
some_array=[]
size=258
#print (self.data_voxels.shape, self.data_voxels_colors.shape)
for i in range(0,64):
for j in range(0,64):
for k in range(0,64):
if self.data_voxels[0,0,i,j,k]>0.5:
some_array.append((i,j,k,self.data_voxels_colors[0,2,int(i),int(j),int(k)]*255,self.data_voxels_colors[0,1,int(i),int(j),int(k)]*255,self.data_voxels_colors[0,0,int(i),int(j),int(k)]*255)) #255,255,255))
some_array = np.array(some_array, dtype=[('x', 'float32'), ('y', 'float32'), ('z', 'float32'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8')])
el = PlyElement.describe(some_array, 'vertex')
PlyData([el]).write('result/res64/'+name+str(data[2][:50].replace('/',' '))+'_gt.ply')
model_pad=np.zeros((66,66,66))
model_pad[1:-1,1:-1,1:-1]=frame_flag[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_pad, self.sampling_threshold)
x = np.linspace(0, 66, 66)
y = np.linspace(0, 66, 66)
z = np.linspace(0, 66, 66)
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("result/res64/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_pred.ply", vertices, triangles, colors)
if config.high_resolution:
model_pad=np.zeros((258,258,258))
model_pad[1:-1,1:-1,1:-1]= model_float[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
x = np.linspace(0, 258,258)
y = np.linspace(0, 258,258)
z = np.linspace(0, 258,258)
color_cube=color_cube_float
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("result/res64/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_258_"+str(idx)+".ply", vertices, triangles, colors)
sampled_points_normals = sample_points_triangle(vertices, triangles, 2048)
vertices_tensor=torch.from_numpy(vertices.astype(np.float32)).cuda()
sampled_points_normals_int=sampled_points_normals.astype('int')
#print (sampled_points_normals.shape, np.unique(sampled_points_normals_int[:,:3]), np.unique(sampled_points_normals[:,3:] ) )
colors=color_cube[:,sampled_points_normals_int[:,0],sampled_points_normals_int[:,1],sampled_points_normals_int[:,2]]
write_ply_point_normal("result/res64/"+str(name)+str(data[2][:50].replace('/',' '))+"_pc.ply", sampled_points_normals, colors) #, colrs)
#print("[sample]")
def get_z(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
model_dir='checkpoint/color_all_ae_64/IM_AE.model16-199_raw.pth'
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
for path in glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5_train/*.hdf5'): #self.data_paths: #[config.start:config.end]:
print (path)
name=path.split('/')[-1]
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels_colors))
#z_vector, _, _ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None, None, is_training=False)
#model_z,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
model_z,z_vector_color,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
z=model_z.detach().cpu().numpy()
z_vector_color=z_vector_color.detach().cpu().numpy()
#print (z.shape, z_vector_color.shape)
z=np.concatenate((z,z_vector_color),1)
print (z.shape)
np.save('../feat32_color_train/'+name+'.npy',z)
'''#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
hdf5_path = self.checkpoint_dir+'/'+self.model_dir+'/'+self.dataset_name+'_train_z.hdf5'
shape_num = len(self.data_voxels)
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("zs", [shape_num,self.z_dim], np.float32)
self.im_network.eval()
#print(shape_num)
for t in range(shape_num):
batch_voxels = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels)
batch_voxels = batch_voxels.to(self.device)
out_z,_ ,_= self.im_network(batch_voxels, None, None, is_training=False)
hdf5_file["zs"][t:t+1,:] = out_z.detach().cpu().numpy()
hdf5_file.close()
print("[z]")'''
def test_z(self, config, batch_z, dim):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
for t in range(batch_z.shape[0]):
model_z = batch_z[t:t+1]
model_z = torch.from_numpy(model_z)
model_z = model_z.to(self.device)
model_float = self.z2voxel(model_z)
#img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
#img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
#img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
#print (model_float)
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
vertices = (vertices.astype(np.float32)-0.5)/self.real_size-0.5
#vertices = self.optimize_mesh(vertices,model_z)
write_ply(config.sample_dir+"/"+"out"+str(t)+".ply", vertices, triangles)
print("[sample Z]")
| [] |
2024-01-10 | liuzhengzhe/Towards-Implicit-Text-Guided-Shape-Generation | manipulation~model_color_table.py | import os,csv
import time
import math
import random
import numpy as np
import h5py
import glob
import scipy.interpolate
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from scipy.interpolate import RegularGridInterpolator
import mcubes
import mcubes as mc
from utils import *
import copy
from mcubes import marching_cubes #, grid_interp
#pytorch 1.2.0 implementation
#from dalle_pytorch import OpenAIDiscreteVAE, DALLE
#from dalle_pytorch.transformer import Transformer,Transformer_mutual
from transformers import AutoModelForSequenceClassification, AutoConfig
from torch.nn.utils import spectral_norm
from pytorch_lamb import Lamb
from transformers import (
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
)
def iou(s1,s2):
pred=torch.flatten(s1)
gt=torch.flatten(s2)
inter1=torch.where((pred==1) & (gt==1))[0].shape[0]
union1=torch.where((pred==1) | (gt==1))[0].shape[0]
iou=inter1/union1
return iou
def grid_interp(vol, points):
"""
Interpolate volume data at given points
Inputs:
vol: 4D torch tensor (C, Nz, Ny, Nx)
points: point locations (Np, 3)
Outputs:
output: interpolated data (Np, C)
"""
#vol=torch.from_numpy(vol)#.cuda()
if vol.is_cuda:
return mc.grid_interp_cuda(vol, points)
else:
return mc.grid_interp_cpu(vol, points) #'''===
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len = 80):
super().__init__()
self.d_model = d_model
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
#print ('xshape', x.shape, seq_len)
x = x + Variable(self.pe[:,:seq_len], requires_grad=False).cuda()
return x
def attention(q, k, v, d_k, mask=None, dropout=None):
#print ('qkv',q.shape, k.shape, v.shape)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
#print ('mask score ', mask.shape, scores.shape)
#print ('s1',scores.shape)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
#print ('s2',scores.shape)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
#print (scores.shape, v.shape)
output = torch.matmul(scores, v)
#print ('output',output.shape)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model, bias=True)
self.v_linear = nn.Linear(d_model, d_model, bias=True)
self.k_linear = nn.Linear(d_model, d_model, bias=True)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model, bias=True)
def forward(self, q, k, v, mask=None):
#print ('qkv',q.shape, k.shape, v.shape)
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
#print (k.shape, q.shape, v.shape, self.d_k, mask.shape)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
#print ('score',scores.shape)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous()\
.view(bs, -1, self.d_model)
#print ('cct',concat.shape)
output = self.out(concat)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=16, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff, bias=True)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model, bias=True)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-5):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
#self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
#self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(heads, d_model) #nn.MultiheadAttention(embed_dim=16, num_heads=4)
self.ff = FeedForward(d_model).cuda()
def forward(self, x, e_outputs, src_mask):
#print ('1',self.norm_2.bias)
#x2 = self.norm_1(x)
#x = x + self.dropout_1(self.attn_1(x2, x2, x2)) # trg_mask
x = self.norm_2(x)
#print ('2',torch.unique(x))
#x=torch.transpose(x,0,1)
#e_outputs=torch.transpose(e_outputs,0,1)
#print ('x,e',x.shape, e_outputs.shape)
#print (self.attn_2(x, e_outputs, e_outputs)[0].shape, x.shape)
x = x +self.dropout_2(self.attn_2(x, e_outputs, e_outputs.clone(), src_mask))
# x=torch.transpose(x,0,1)
#print ('3',torch.unique(x))
x = self.norm_3(x)
#print ('4',torch.unique(x))
x = x+self.dropout_3(self.ff(x))
#print ('5',torch.unique(x))
return x
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class generator(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
'''dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
#print (points.shape, z.shape)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
#print ('pointz',torch.unique(pointz), torch.unique(texts))
#print ('weight', torch.unique(self.linear_text_k.weight), torch.unique(self.linear_shape_q.weight))
#print ('bias', torch.unique(self.linear_text_k.bias), torch.unique(self.linear_shape_q.bias))
x=linear_shape_q
src_mask=masks
#print (masks.shape)
'''x = self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
#print ('b',linear_text_k.shape, texts.shape)
linear_text_k = self.pe(linear_text_k)
#print ('a',linear_text_k.shape)
#print ('x1',torch.unique(x),self.linear_text_k.)
#print ('linear_text_k',torch.unique(linear_text_k))
for i in range(self.N):
#print ('i',i,x.shape, linear_text_k.shape, src_mask.shape)
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#print (torch.unique(pointz),torch.unique(x))
pointz = torch.cat([pointz, x],2)
#print (torch.unique(position_sense_feat))
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l7
class generator_color(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator_color, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
#multihead_attn = nn.MultiheadAttention(embed_dim=16, num_heads=4)
#self.transformer_model = nn.Transformer(d_model=16, nhead=4, num_encoder_layers=0, num_decoder_layers=1, dim_feedforward=16)
'''self.softmax=torch.nn.Softmax(1)
dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
pointz = torch.cat([points,zs],2)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
x=linear_shape_q
#linear_text_k = self.pe(linear_text_k)
#print ('generator color',torch.unique(x))
src_mask=masks
for i in range(self.N):
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#torch.nn.Transformer(d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', custom_encoder=None, custom_decoder=None)
#attn_output, attn_output_weights = multihead_attn(x, key, value)
#print (x.shape,linear_text_k.shape)
#x = self.transformer_model(torch.transpose(linear_text_k,0,1), torch.transpose(x,0,1) )
#print (x.shape)
#x=torch.transpose(x,0,1)
#print (torch.unique(pointz),torch.unique(x))
#print (masks.shape)
'''x =self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
'''linear_text_k = self.linear_text_k(texts)
linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz)
att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
pointz = torch.cat([pointz, x],2)
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
#l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
#l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l8
class encoder(nn.Module):
def __init__(self, ef_dim, z_dim):
super(encoder, self).__init__()
self.ef_dim = ef_dim
self.z_dim = z_dim
self.conv_1 = nn.Conv3d(1+3, self.ef_dim, 4, stride=2, padding=1, bias=False)
self.in_1 = nn.InstanceNorm3d(self.ef_dim)
self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim*2, 4, stride=2, padding=1, bias=False)
self.in_2 = nn.InstanceNorm3d(self.ef_dim*2)
self.conv_3 = nn.Conv3d(self.ef_dim*2, self.ef_dim*4, 4, stride=2, padding=1, bias=False)
self.in_3 = nn.InstanceNorm3d(self.ef_dim*4)
self.conv_4 = nn.Conv3d(self.ef_dim*4, self.ef_dim*8, 4, stride=2, padding=1, bias=False)
self.in_4 = nn.InstanceNorm3d(self.ef_dim*8)
self.conv_5 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
self.conv_6 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv_1.weight)
nn.init.xavier_uniform_(self.conv_2.weight)
nn.init.xavier_uniform_(self.conv_3.weight)
nn.init.xavier_uniform_(self.conv_4.weight)
nn.init.xavier_uniform_(self.conv_5.weight)
nn.init.constant_(self.conv_5.bias,0)
nn.init.xavier_uniform_(self.conv_6.weight)
nn.init.constant_(self.conv_6.bias,0)
def forward(self, inputs, is_training=False):
#print ('input',inputs.shape)
d_1 = self.in_1(self.conv_1(inputs))
d_1 = F.leaky_relu(d_1, negative_slope=0.02, inplace=True)
d_2 = self.in_2(self.conv_2(d_1))
d_2 = F.leaky_relu(d_2, negative_slope=0.02, inplace=True)
d_3 = self.in_3(self.conv_3(d_2))
d_3 = F.leaky_relu(d_3, negative_slope=0.02, inplace=True)
d_4 = self.in_4(self.conv_4(d_3))
d_4 = F.leaky_relu(d_4, negative_slope=0.02, inplace=True)
d_5 = self.conv_5(d_4)
d_5 = d_5.view(-1, self.z_dim)
d_5 = torch.sigmoid(d_5)
d_6 = self.conv_6(d_4)
d_6 = d_6.view(-1, self.z_dim)
d_6 = torch.sigmoid(d_6)
return d_5, d_6
class ConditionInjection(nn.Module):
def __init__(self, num_features, condition_dim=64, norm=nn.LayerNorm):
super().__init__()
if norm is not None:
if norm is nn.LayerNorm:
self.norm = norm(num_features, elementwise_affine=False)
elif norm is nn.BatchNorm1d:
self.norm = norm(num_features, affine=False)
else:
raise ValueError('unknown norm type')
self.condition_dim = condition_dim
fc1 = nn.Linear(condition_dim, condition_dim)
fc1 = initLinear(fc1)
fc1 = spectral_norm(fc1)
fc2 = nn.Linear(condition_dim, num_features * 2)
fc2 = initLinear(fc2)
fc2 = spectral_norm(fc2)
self.projection = nn.Sequential(
fc1,
nn.LeakyReLU(.2),
fc2,
)
def forward(self, x, z=None): # x shape
if z is None:
z = torch.randn(x.size(0), self.condition_dim).float().cuda()
y = self.projection(z)
#print (x.shape, y.shape, z.shape)
for _ in range(x.dim() - y.dim()):
y = y.unsqueeze(-1)
gamma, beta = y.chunk(2, 1)
# print(gamma.shape, beta.shape)
out = self.norm(x) if self.norm is not None else x
out = out * (1+gamma) + beta
return out
def initLinear(block, type='xavier_uniform'):
assert type in ['xavier_uniform']
nn.init.xavier_uniform_(block.weight, gain=0.02)
nn.init.constant_(block.bias, 0)
return block
class LatentGenerator(nn.Module):
def __init__(self, num_features, hidden=64, z_dim=64):
super().__init__()
fc1 = nn.Linear(num_features, num_features)
fc2 = nn.Linear(num_features, num_features)
fc3 = nn.Linear(num_features, hidden)
fc4 = nn.Linear(hidden, hidden*2)
fc5 = nn.Linear(hidden*2, num_features)
fc6 = nn.Linear(num_features, num_features)
fc1 = initLinear(fc1)
fc2 = initLinear(fc2)
fc3 = initLinear(fc3)
fc4 = initLinear(fc4)
fc5 = initLinear(fc5)
fc6 = initLinear(fc6)
self.fc1 = spectral_norm(fc1)
self.fc2 = spectral_norm(fc2)
self.fc3 = spectral_norm(fc3)
self.fc4 = spectral_norm(fc4)
self.fc5 = spectral_norm(fc5)
self.fc6 = spectral_norm(fc6)
self.norm1 = ConditionInjection(num_features, z_dim)
self.norm2 = ConditionInjection(hidden, z_dim)
# self.norm3 = ConditionInjection(num_features, z_dim)
def forward(self, x, z=None):
if z is None:
z = torch.randn(x.size(0), self.condition_dim).float().cuda()
out = self.fc1(x)
out = self.norm1(out, z)
out = F.leaky_relu(out, .2)
out = self.fc2(out)
out = F.leaky_relu(out, .2)
out = self.fc3(out)
out = self.norm2(out, z)
out = F.leaky_relu(out, .2)
out = self.fc4(out)
out = F.leaky_relu(out, .2)
out = self.fc5(out)
out = F.leaky_relu(out, .2)
out = self.fc6(out)
out = F.sigmoid(out)
return out
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = 512
self.c_dim = 512
self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)
self.relu = nn.ReLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.cuda.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class im_network(nn.Module):
def __init__(self, ef_dim, gf_dim, z_dim, point_dim):
super(im_network, self).__init__()
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.z_dim = z_dim
self.point_dim = point_dim
self.encoder = encoder(self.ef_dim, self.z_dim)
pretrained_path='bert-base-uncased'
config = AutoConfig.from_pretrained(
str(pretrained_path), #num_labels=len(dataBunch.labels)
)
self.model = AutoModelForSequenceClassification.from_pretrained(
str(pretrained_path), config=config, state_dict=None
)
#self.ca=CA_NET()
self.net_g=LatentGenerator(512)
#self.net_g_color=LatentGenerator(256)
self.encoder = encoder(self.ef_dim, self.z_dim)
self.generator = generator(self.z_dim, self.point_dim, self.gf_dim)
self.generator_color = generator_color(self.z_dim, self.point_dim, self.gf_dim)
self.cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
def forward(self, texts, masks, inputs, z_vector, input_idxs, z_vector_c2,noise,point_coord, words, is_training=False):
if texts!=None:
text_inputs = {
"input_ids": texts,
"attention_mask": masks,
}
if is_training:
#print ('traiing')
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
z_vector, _, z_vector_c2, words = self.model(**text_inputs)
#num_noise=10
#noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
#print ('noise.shape',noise.shape)
num_noise=int(noise.shape[0]/z_vector.size(0))
#z_vector_ca, mu, var=self.ca(torch.cat((z_vector, z_vector_c2), 1))
#z_vector=z_vector_ca[:,:256]
#z_vector_c2=z_vector_ca[:,256:]
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
#print ('2', torch.unique(z_in))
#z_noise=torch.cat((z_in, noise), 2)
z_div=self.net_g(z_in, noise.detach())
#print ('3', torch.unique(z_div))
z_div=torch.reshape(z_div, (-1, num_noise, 512))
z_std=torch.unsqueeze(torch.cat((z_vector_std, z_vector_c_std), 1),1).repeat(1,num_noise,1)
#print ('4', torch.unique(z_std))
diff=torch.sum(torch.abs(z_div-z_std),2)
#print ('diff', diff.shape)
idx=torch.argmin(diff,1)
#print ('5', idx, idx.shape)
#z_best=z_div[:,idx,:]
#print (z_div.shape, z_best.shape, z_best)
idxs=torch.unsqueeze(torch.unsqueeze(idx,-1),-1)
if input_idxs!=None:
idxs=input_idxs
idxs_=idxs.repeat(1, num_noise, z_in.shape[-1])
#print ('idx, z div', idx.shape, z_div.shape, torch.unique(idx))
#print ('z_div', z_div.shape, idxs_.shape)
z_best=torch.gather(z_div, 1, idxs_)[:,0,:]
#print ('z_best', z_best.shape)
#z_best=torch.index_select(z_div, 0, idx)
#print ('z best', z_best.shape, z_best)
z_vector_div=z_best[:, :256]
z_vector_c2_div=z_best[:, 256:]
return z_vector_div,idxs, z_vector_c2_div, z_vector_std,None, z_vector_c_std, None, None, z_vector, z_vector_c2, words
else:
if texts is not None:
z_vector,z_vector_color, z_vector_c2, words = self.model(**text_inputs)
#z_vector_ca, mu, var=self.ca(torch.cat((z_vector, z_vector_c2), 1))
#z_vector=z_vector_ca[:,:256]
#z_vector_c2=z_vector_ca[:,256:]
'''num_noise=30
noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.net_g(z_in, noise.detach()*25)
z_vector=z_div[:, :256]
z_vector_c2=z_div[:, 256:]'''
return z_vector, None, z_vector_c2,None, None,None,None, words
if z_vector is not None and point_coord is not None:
#print (point_coord.shape, z_vector.shape, words.shape, masks.shape)
net_out = self.generator(point_coord, z_vector, words, masks, is_training=is_training)
net_out_color = self.generator_color(point_coord, z_vector_c2, words, masks, is_training=is_training)
#print ('net out unique', torch.unique(net_out))
return None,None,None, net_out, net_out_color, None, None #, residue_color+s1_color, s1_color
#elif z_vector is not None and point_coord is not None:
# net_out = self.generator(point_coord, z_vector, is_training=is_training)
# return None,None,None, net_out, None,None,None,
elif (inputs is not None) and (inputs.shape[1]==4):
#z_vector_std, z_vector_color_std, z_vector_c2_std = self.encoder(inputs, is_training=is_training)
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
return z_vector_std,None, z_vector_c_std,None, None,None,None #, net_out, None,None,None,
class IM_color_table(object):
def __init__(self, config):
#progressive training
#1-- (16, 16*16*16)
#2-- (32, 16*16*16)
#3-- (64, 16*16*16*4)
self.sample_vox_size = config.sample_vox_size
print (self.sample_vox_size)
if self.sample_vox_size==16:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==32:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==64:
self.load_point_batch_size = 16*16*16*4
self.point_batch_size = 16*16*16
self.shape_batch_size = 10
self.input_size = 64 #input voxel grid size
self.ef_dim = 32
self.gf_dim = 128
self.z_dim = 256
self.point_dim = 3
self.dataset_name = config.dataset
#self.dataset_load = self.dataset_name + '_train'
#self.data_paths=glob.glob('hdf5/*.hdf5') #/ccd5e*.hdf5')
self.datas=[]
#start=1
with open('table_train.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
#if start==1:
# start=0
# continue
text=row[2]
name=row[1]
self.datas.append((text,name))
#break
#for i in range(32):
# self.datas.append(self.datas[0])
if not (config.train):# or config.getz):
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
self.datas=[]
with open('color_table.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
text=row[2]
name=row[1]
text_str=row[0]
self.datas.append((text,name,text_str))
self.checkpoint_dir = config.checkpoint_dir
self.data_dir = config.data_dir
#data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
#print ('data name lzz',data_hdf5_name)
'''if not (config.train or config.getz):
self.dataset_load = self.dataset_name + '_test'
data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
data_dict = h5py.File(data_hdf5_name, 'r')
print ('load')
self.data_points = (data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5
self.data_values = data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32)
self.data_colors = data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0
self.data_voxels = data_dict['voxels'][:]
self.data_voxels_colors = data_dict['voxels_colors'][:]/255.0
self.data_voxels_colors = np.transpose(self.data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors = np.reshape(self.data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size])
#reshape to NCHW
self.data_voxels = np.reshape(self.data_voxels, [-1,1,self.input_size,self.input_size,self.input_size])
#else:
# print("error: cannot load "+data_hdf5_name)
# exit(0)'''
#print ('loaded')
if torch.cuda.is_available():
self.device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device('cpu')
#build model
self.im_network = im_network(self.ef_dim, self.gf_dim, self.z_dim, self.point_dim)
self.im_network.to(self.device)
#print params
for param_tensor in self.im_network.model.parameters():
param_tensor.requires_grad=False
for param_tensor in self.im_network.encoder.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator_color.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
self.optimizer = torch.optim.Adam(self.im_network.parameters(), lr=0.001, betas=(config.beta1, 0.999))
#self.optimizer = self.get_optimizer(0.001, optimizer_type="lamb")
#self.optimizer = torch.optim.Adam([{'params': base_params}, {'params': self.im_network.model.parameters(), 'lr': 0.001}], lr=config.learning_rate*1, betas=(config.beta1, 0.999))
#self.scheduler = self.get_scheduler(
# self.optimizer, t_total=int(60470*config.epoch), schedule_type="warmup_cosine"
#)
#pytorch does not have a checkpoint manager
#have to define it myself to manage max num of checkpoints to keep
self.max_to_keep = 2
self.checkpoint_path = os.path.join(self.checkpoint_dir, self.model_dir)
self.checkpoint_name='IM_color_table.model'
self.checkpoint_manager_list = [None] * self.max_to_keep
self.checkpoint_manager_pointer = 0
#loss
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
self.KL_loss=KL_loss
def network_loss(G,point_value):
return torch.mean((G-point_value)**2)
self.loss = network_loss
def network_loss_mask(G,point_value,mask):
mask=mask.unsqueeze(1).repeat(1,256)
#print (G.shape, point_value.shape, mask.shape)
return torch.mean(((G-point_value)**2)*mask)
self.loss_mask = network_loss_mask
def color_loss(G,point_color,mask):
return torch.mean(((G-point_color)*mask)**2)
self.color_loss = color_loss
#keep everything a power of 2
self.cell_grid_size = 4
self.frame_grid_size = 64
self.real_size = self.cell_grid_size*self.frame_grid_size #=256, output point-value voxel grid size in testing
self.test_size = 32 #related to testing batch_size, adjust according to gpu memory size
self.test_point_batch_size = self.test_size*self.test_size*self.test_size #do not change
self.test_point_batch_size_in_training=4096
#get coords for training
dima = self.test_size
dim = self.frame_grid_size
self.aux_x = np.zeros([dima,dima,dima],np.uint8)
self.aux_y = np.zeros([dima,dima,dima],np.uint8)
self.aux_z = np.zeros([dima,dima,dima],np.uint8)
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
for i in range(dima):
for j in range(dima):
for k in range(dima):
self.aux_x[i,j,k] = i*multiplier
self.aux_y[i,j,k] = j*multiplier
self.aux_z[i,j,k] = k*multiplier
self.coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
self.coords[i*multiplier2+j*multiplier+k,:,:,:,0] = self.aux_x+i
self.coords[i*multiplier2+j*multiplier+k,:,:,:,1] = self.aux_y+j
self.coords[i*multiplier2+j*multiplier+k,:,:,:,2] = self.aux_z+k
self.coords = (self.coords.astype(np.float32)+0.5)/dim-0.5
self.coords = np.reshape(self.coords,[multiplier3,self.test_point_batch_size,3])
self.coords = torch.from_numpy(self.coords)
self.coords = self.coords.to(self.device)
#get coords for testing
dimc = self.cell_grid_size
dimf = self.frame_grid_size
self.cell_x = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_y = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_z = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_coords = np.zeros([dimf,dimf,dimf,dimc,dimc,dimc,3],np.float32)
self.frame_coords = np.zeros([dimf,dimf,dimf,3],np.float32)
s=16
self.frame_coords_train = torch.zeros([s,s,s,3]).cuda()
self.frame_x = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_y = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_z = np.zeros([dimf,dimf,dimf],np.int32) #.long()
for i in range(dimc):
for j in range(dimc):
for k in range(dimc):
self.cell_x[i,j,k] = i
self.cell_y[i,j,k] = j
self.cell_z[i,j,k] = k
for i in range(dimf):
for j in range(dimf):
for k in range(dimf):
self.cell_coords[i,j,k,:,:,:,0] = self.cell_x+i*dimc
self.cell_coords[i,j,k,:,:,:,1] = self.cell_y+j*dimc
self.cell_coords[i,j,k,:,:,:,2] = self.cell_z+k*dimc
self.frame_coords[i,j,k,0] = i
self.frame_coords[i,j,k,1] = j
self.frame_coords[i,j,k,2] = k
self.frame_x[i,j,k] = i
self.frame_y[i,j,k] = j
self.frame_z[i,j,k] = k
for i in range(s):
for j in range(s):
for k in range(s):
self.frame_coords_train[i,j,k,0] = i
self.frame_coords_train[i,j,k,1] = j
self.frame_coords_train[i,j,k,2] = k
self.cell_coords = (self.cell_coords.astype(np.float32)+0.5)/self.real_size-0.5
self.cell_coords = np.reshape(self.cell_coords,[dimf,dimf,dimf,dimc*dimc*dimc,3])
self.cell_x = np.reshape(self.cell_x,[dimc*dimc*dimc])
self.cell_y = np.reshape(self.cell_y,[dimc*dimc*dimc])
self.cell_z = np.reshape(self.cell_z,[dimc*dimc*dimc])
self.frame_x = np.reshape(self.frame_x,[dimf*dimf*dimf])
self.frame_y = np.reshape(self.frame_y,[dimf*dimf*dimf])
self.frame_z = np.reshape(self.frame_z,[dimf*dimf*dimf])
self.frame_coords = (self.frame_coords+0.5)/dimf-0.5
self.frame_coords = np.reshape(self.frame_coords,[dimf*dimf*dimf,3])
self.frame_coords_train = (self.frame_coords_train+0.5)/s-0.5
self.frame_coords_train = torch.reshape(self.frame_coords_train,[s*s*s,3])
#self.conv_edge = nn.Conv3d(3, 3, 3, stride=1, padding=1, groups=3, bias=False)
#self.conv_edge.to(self.device)
self.sampling_threshold = 0.5 #final marching cubes threshold
self.upsample=nn.Upsample(scale_factor=4,mode='trilinear').cuda()
def get_optimizer(self, lr, optimizer_type="lamb"):
# Prepare optimiser and schedule
no_decay = [] #"bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.im_network.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.0, #self.weight_decay,
},
{
"params": [
p
for n, p in self.im_network.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
if optimizer_type == "lamb":
optimizer = Lamb(optimizer_grouped_parameters, lr=lr, eps=1e-8)
elif optimizer_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8
)
return optimizer
def get_scheduler(self, optimizer, t_total, schedule_type="warmup_cosine"):
SCHEDULES = {
"warmup_cosine": get_cosine_schedule_with_warmup,
}
if schedule_type == None or schedule_type == "none":
return SCHEDULES[schedule_type](optimizer)
elif schedule_type == "warmup_constant":
return SCHEDULES[schedule_type](
optimizer, num_warmup_steps=0 #self.warmup_steps
)
else:
return SCHEDULES[schedule_type](
optimizer,
num_warmup_steps=0, #self.warmup_steps,
num_training_steps=t_total,
)
def z2voxel(self, z, z_color, words, masks,config):
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_,_, model_out_, color_out_,_,_ = self.im_network(None,masks,None, z,None, z_color,None, point_coord, words, is_training=False)
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
if config.high_resolution:
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_,_, model_out_batch_, color_out_batch_,_,_ = self.im_network(None, masks,None,z,None,z_color,None, cell_coords, words, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, frame_flag, color_cube
@property
def model_dir(self):
return "{}_ae_{}".format(self.dataset_name, self.input_size)
def train(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
print (checkpoint_txt)
if 1: #os.path.exists(checkpoint_txt):
pass
#fin = open(checkpoint_txt)
#model_dir = fin.readline().strip()
'''model=torch.load('/mnt/sda/lzz/imle-cyclic-multi-att-5-noise/checkpoint/color_all_ae_64/IM_AE.model32-209_raw.pth')
model2={}
for k in model.keys():
if 'encoder' in k or 'generator' in k or 'bert' in k:
continue
model2[k]=model[k]
self.im_network.load_state_dict(model2,strict=False)'''
model_dir='checkpoint/color_all_ae_64/div.model64-149.pth'
#model_dir='/mnt/sda/lzz/imle-cyclic-multi-att-5-noise/IM_AE.model32-99_raw.pth' #'checkpoint/color_all_ae_64/IM_AE.model32-94_raw.pth'
self.im_network.load_state_dict(torch.load(model_dir),strict=False)
#model_dir='/mnt/sda/lzz/imle-cyclic-multi-att-5-noise/checkpoint/color_all_ae_64/IM_AE.model32-209_raw.pth'
#self.im_network.load_state_dict(torch.load(model_dir),strict=False)
#model_dir='init149.pth'
#self.im_network.load_state_dict(torch.load(model_dir),strict=False)
#print(" [*] Load SUCCESS",model_dir)
else:
print(" [!] Load failed...")
shape_num = len(self.datas)
batch_index_list = np.arange(shape_num)
batch_index_list2 = np.arange(shape_num)
print("\n\n----------net summary----------")
print("training samples ", shape_num)
print("-------------------------------\n\n")
start_time = time.time()
assert config.epoch==0 or config.iteration==0
training_epoch = config.epoch + int(config.iteration/shape_num)
batch_num = int(shape_num/self.shape_batch_size)
point_batch_num = int(self.load_point_batch_size/self.point_batch_size)
for epoch in range(0, training_epoch): #int(model_dir.split('/')[-1].split('-')[-1].split('_')[0])
self.im_network.train()
np.random.shuffle(batch_index_list)
np.random.shuffle(batch_index_list2)
avg_loss_sp = 0
avg_loss_kl = 0
avg_loss_color = 0
avg_loss_color2 = 0
avg_loss_value = 0
avg_loss_value_unpair =0
avg_loss_color2_unpair =0
avg_value_out =0
avg_color_out =0
avg_value_out_std =0
avg_color_out_std =0
avg_loss_value_rec =0
avg_loss_color2_rec =0
avg_num = 0
self.data_points=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_values=np.zeros((self.shape_batch_size,self.load_point_batch_size,1))
self.data_colors=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_voxels=np.zeros((self.shape_batch_size,1,64,64,64))
self.data_voxels_colors=np.zeros((self.shape_batch_size,3,64,64,64))
self.data_voxels2=np.zeros((self.shape_batch_size,1,64,64,64))
self.data_voxels_colors2=np.zeros((self.shape_batch_size,3,64,64,64))
#self.pred_voxels=torch.zeros((self.shape_batch_size,1,64,64,64)).to(self.device)
#self.pred_voxels_colors=torch.zeros((self.shape_batch_size,3,64,64,64)).to(self.device)
for idx in range(batch_num):
dxb = batch_index_list[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]
dxb_text = batch_index_list2[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]
self.data_points[:]=0
self.data_values[:]=0
self.data_colors[:]=0
self.data_voxels[:]=0
self.data_voxels_colors[:]=0
self.data_voxels2[:]=0
self.data_voxels_colors2[:]=0
batch_paths_shape=np.asarray(self.datas)[dxb]
batch_paths_text=np.asarray(self.datas)[dxb_text]
texts_unpair=np.zeros((batch_paths_text.shape[0], 64))
masks_unpair=np.zeros((batch_paths_text.shape[0], 64))
texts_pair=np.zeros((batch_paths_text.shape[0], 64))
masks_pair=np.zeros((batch_paths_text.shape[0], 64))
for b in range(batch_paths_shape.shape[0]): #path in batch_paths:
text_list_pair=batch_paths_shape[b][0].split(' ')[:-1] #.astype('int')
text_array_pair = np.asarray(list(map(int, text_list_pair)))
text_list_unpair=batch_paths_text[b][0].split(' ')[:-1] #.astype('int')
text_array_unpair = np.asarray(list(map(int, text_list_unpair)))
path='../hdf5_train_new/'+batch_paths_shape[b][1]+'.hdf5'
name=batch_paths_shape[b][1]
data_dict = h5py.File(path, 'r')
self.data_points[b,:,:]=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values[b,:,:]=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors[b,:,:]=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
texts_unpair[b,:min(64,len(text_list_unpair))]=text_array_unpair[:min(64,len(text_list_unpair))]
masks_unpair[b,:min(64,len(text_list_unpair))]=1
texts_pair[b,:min(64,len(text_list_pair))]=text_array_pair[:min(64,len(text_list_pair))]
masks_pair[b,:min(64,len(text_list_pair))]=1
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors[b,:,:,:,:]=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels[b,:,:,:,:]=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
path2='../hdf5_train_new/'+batch_paths_text[b][1]+'.hdf5'
name2=batch_paths_text[b][1]
data_dict2 = h5py.File(path2, 'r')
tmp_data_voxels_colors2 = data_dict2['voxels_colors'][:]/255.0
tmp_data_voxels_colors2 = np.transpose(tmp_data_voxels_colors2, (0,4,1,2,3))
self.data_voxels_colors2[b,:,:,:,:]=(np.reshape(tmp_data_voxels_colors2, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels2[b,:,:,:,:]=(np.reshape(data_dict2['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
#print ('datapoints', data_dict['points_'+str(self.sample_vox_size)].shape, self.data_points.shape)
batch_voxels = self.data_voxels.astype(np.float32) #[dxb].astype(np.float32)
batch_voxels_colors = self.data_voxels_colors.astype(np.float32) # [dxb].astype(np.float32)
batch_voxels2 = self.data_voxels2.astype(np.float32) #[dxb].astype(np.float32)
batch_voxels_colors2 = self.data_voxels_colors2.astype(np.float32) # [dxb].astype(np.float32)
if point_batch_num==1:
point_coord = self.data_points#[dxb]
point_value = self.data_values#[dxb]
point_color = self.data_colors#[dxb]
else:
which_batch = 0 #np.random.randint(point_batch_num)
point_coord = self.data_points[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size] #[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_value = self.data_values[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_color = self.data_colors[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
batch_voxels = torch.from_numpy(batch_voxels).float()
batch_voxels_colors = torch.from_numpy(batch_voxels_colors).float()
batch_voxels2 = torch.from_numpy(batch_voxels2).float()
batch_voxels_colors2 = torch.from_numpy(batch_voxels_colors2).float()
ious=torch.zeros((batch_voxels.shape[0])).cuda()
for i in range(batch_voxels.shape[0]):
ious[i]=iou(batch_voxels[i,0,:,:,:],batch_voxels2[i,0,:,:,:])
iou_mask=torch.zeros((batch_voxels.shape[0])).cuda()
iou_mask[torch.where(ious>0.2)]=1
iou_mask=iou_mask.detach()
point_coord = torch.from_numpy(point_coord).float()
point_value = torch.from_numpy(point_value).float()
point_color = torch.from_numpy(point_color).float()
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = batch_voxels_colors.to(self.device)
batch_voxels2 = batch_voxels2.to(self.device)
batch_voxels_colors2 = batch_voxels_colors2.to(self.device)
point_coord = point_coord.to(self.device)
point_value = point_value.to(self.device)
point_color = point_color.to(self.device)
texts_unpair=torch.from_numpy(texts_unpair).to(self.device).long()
masks_unpair=torch.from_numpy(masks_unpair).to(self.device).bool()
texts=torch.from_numpy(texts_pair).to(self.device).long()
masks=torch.from_numpy(masks_pair).to(self.device).bool()
num_noise=10
noise = torch.randn(texts.size(0)*num_noise, 64).float().to('cuda')
self.im_network.zero_grad()
z_vector,idxs, z_vector_c2, z_vector_std, z_vector_color_std, z_vector_color2_std, net_out, residue_color, z_vector_init, z_vector_c2_init, words = self.im_network(texts,masks, torch.cat((batch_voxels,batch_voxels_colors),1), None,None,None,noise, point_coord, None, is_training=True)
#z_best=torch.gather(z_div, 1, idxs)[:,0,:]
z_vector_unpair,_, z_vector_c2_unpair, z_vector_std_unpair, z_vector_color_std, z_vector_color2_std_unpair, net_out_unpair, residue_color_unpair, z_vector_unpair_init, z_vector_c2_unpair_init, words_unpair = self.im_network(texts_unpair,masks_unpair, torch.cat((batch_voxels2,batch_voxels_colors2),1), None,idxs,None,noise, point_coord, None, is_training=True) #torch.cat((batch_voxels,batch_voxels_colors),1)
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector_init,z_vector_c2_unpair_init), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.im_network.net_g(z_in, noise.detach())
z_div=torch.reshape(z_div, (-1, num_noise, 512))
idxs_=idxs.repeat(1, num_noise, z_in.shape[-1])
#print (z_div.shape, idxs.shape)
z_best=torch.gather(z_div, 1, idxs_)[:,0,:]
z_vector_div=z_best[:, :256]
z_vector_c2_div=z_best[:, 256:]
#print (z_best.shape, 'best')
#sim_shape=self.im_network.cos(z_vector_std,z_vector_std_unpair).unsqueeze(1).repeat(1,256)
#sim_color=self.im_network.cos(z_vector_color2_std,z_vector_color2_std_unpair).unsqueeze(1).repeat(1,256)
#print (z_vector.shape, sim_shape.shape)
#mix_shape=z_vector*(torch.ones(z_vector.shape).cuda()-sim_shape)+z_vector_unpair*sim_shape
#mix_color=z_vector_c2*(torch.ones(z_vector_c2.shape).cuda()-sim_color)+z_vector_c2_unpair*sim_color
#mix_shape_std=z_vector_std*(torch.ones(z_vector_std.shape).cuda()-sim_shape)+z_vector_std_unpair*sim_shape
#mix_color_std=z_vector_color2_std*(torch.ones(z_vector_color2_std.shape).cuda()-sim_color)+z_vector_color2_std_unpair*sim_color
frame_batch_num = 1
point_coord = self.frame_coords_train
point_coord = torch.unsqueeze(point_coord, 0)
point_coord = point_coord.repeat(z_vector.shape[0],1,1)
#print (masks.shape, z_vector_div.shape, z_vector_c2_div.shape, point_coord.shape, words_unpair.shape)
_,_,_,model_out,color_final,_,_ = self.im_network(None, masks, None,z_vector_div, None,z_vector_c2_div, None, point_coord, words_unpair, is_training=False)
#model_out[torch.where(model_out>self.sampling_threshold)]=1
#model_out[torch.where(model_out<=self.sampling_threshold)]=0
model_out_pad=torch.ones(model_out.shape).cuda()-model_out
model_out_pad=torch.cat((model_out, model_out_pad),-1)
model_out_pad=F.gumbel_softmax(model_out_pad, tau=1, hard=False)
#print (model_out)
model_out= model_out_pad[:,:,0]
#print (model_out, 'model out')
s=16
model_out=torch.reshape(model_out, (z_vector.shape[0],1,s,s,s))
pred_shape=self.upsample(model_out) #self.pred_voxels[:]=
color_final=torch.transpose(color_final,1,2)
color_final=torch.reshape(color_final, (z_vector.shape[0],3,s,s,s))
pred_color=self.upsample(color_final)
pred_color[:,0,:,:,:][torch.where(pred_shape[:,0,:,:,:]<0.5)]=0
pred_color[:,1,:,:,:][torch.where(pred_shape[:,0,:,:,:]<0.5)]=0
pred_color[:,2,:,:,:][torch.where(pred_shape[:,0,:,:,:]<0.5)]=0
#inputs32=torch.cat((batch_voxels,batch_voxels_colors),1) #[:,:,0:64:2,0:64:2,0:64:2]
#inputs32=nn.functional.interpolate(inputs32, scale_factor=0.5, mode='trilinear')
#inputs32=self.upsample(inputs32)
#z_vector_std_16, z_vector_color2_std_16 = self.im_network.encoder(inputs32.detach(), is_training=False)
z_vector_rec, z_vector_c2_rec =self.im_network.encoder(torch.cat((pred_shape, pred_color),1), is_training=False)
#_,_,_,model_out, color_out, color_final , color_s1 = self.im_network(None, None, None, z_vector, z_vector_color, z_vector_c2,None,point_coord, is_training=False)
'''_,_,_,model_out,color_final,_,_ = self.im_network(None, masks, None, z_vector, z_vector_color, z_vector_c2, None, point_coord, words, is_training=False)
model_out[torch.where(model_out>self.sampling_threshold)]=1
model_out[torch.where(model_out<=self.sampling_threshold)]=0
model_out=torch.reshape(model_out, (-1,1,16,16,16))
pred_shape=self.upsample(model_out) #self.pred_voxels[:]=
#_,_,_,_, color_out_, color_final , color_s1 = self.im_network(None, None,pred_shape, z_vector, z_vector_color, z_vector_c2,None, point_coord, is_training=False)
#print (color_out_.shape)
color_final=torch.transpose(color_final,1,2)
color_final=torch.reshape(color_final, (-1,3,16,16,16))
pred_color=self.upsample(color_final) #self.pred_voxels_colors[:]
pred_color[:,0,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
pred_color[:,1,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
pred_color[:,2,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
z_vector_rec, z_vector_c2_rec =self.im_network.encoder(torch.cat((pred_shape, pred_color),1), is_training=False)'''
#z_vector_rec=z_vector_rec.detach()
#z_vector_c2_rec=z_vector_c2_rec.detach()
#kl_loss = self.KL_loss(mu, var)*0.01
errSP_value = self.loss(z_vector, z_vector_std)*2
errSP_color2 = self.loss(z_vector_c2, z_vector_color2_std)*1.0
errSP_value_unpair = self.loss(z_vector_unpair, z_vector_std_unpair)*2
errSP_color2_unpair = self.loss(z_vector_c2_unpair, z_vector_color2_std_unpair)*1.0
#errSP_value_out = self.loss(net_out, point_value)
#point_value3_2=point_value.repeat(1,1,3)
#errSP_color_out = self.color_loss(residue_color, point_color, point_value3_2)*10.0
#errSP_value_out_std = self.loss(net_out_std, point_value)
#errSP_color_out_std = self.color_loss(residue_color_std, point_color, point_value3_2)*10.0
errSP_value_rec = self.loss_mask(z_vector_rec, z_vector, iou_mask)*0.01#*iou_mask.detach()
errSP_color2_rec = self.loss_mask(z_vector_c2_rec, z_vector_c2, iou_mask)*0.01#*iou_mask.detach()
errSP=errSP_value+ errSP_color2 +errSP_value_unpair+errSP_color2_unpair+errSP_value_rec+errSP_color2_rec #+ errSP_value_out_std+errSP_color_out_std + errSP_value_rec + errSP_color2_rec# +errSP_value_rec+errSP_color_rec+errSP_color2_rec +errSP_value_rec_text +errSP_color_rec_text +errSP_color2_rec_text
errSP.backward()
#nn.utils.clip_grad_norm(list(self.im_network.generator_color.parameters())+list(self.im_network.dalle.parameters()) , 0.05)
#torch.nn.utils.clip_grad_norm_(
# self.im_network.parameters(), 1
#)
self.optimizer.step()
#avg_loss_kl += kl_loss.item()
avg_loss_value += errSP_value.item()
avg_loss_color2 += errSP_color2.item()
avg_loss_value_unpair += errSP_value_unpair.item()
avg_loss_color2_unpair += errSP_color2_unpair.item()
'''avg_value_out_std += errSP_value_out_std.item()
avg_color_out_std += errSP_color_out_std.item()'''
avg_loss_value_rec += errSP_value_rec.item()
#avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()
'''avg_loss_value_rec += errSP_value_rec.item()
avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()
avg_loss_value_rec_text += errSP_value_rec_text.item()
avg_loss_color_rec_text += errSP_color_rec_text.item()
avg_loss_color2_rec_text += errSP_color2_rec_text.item()'''
avg_loss_sp += errSP.item()
avg_num += 1
#print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f, loss_value_out_std: %.6f, loss_color_out_std: %.6f, loss_value_sp_rec: %.6f, loss_color_2_rec: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_value_out_std/avg_num, avg_color_out_std/avg_num, avg_loss_value_rec/avg_num, avg_loss_color2_rec/avg_num, avg_loss_sp/avg_num))
print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f,loss_value_sp_unpair: %.6f, loss_color_sp_unpair: %.6f,loss_value_sp_rec: %.6f, loss_color_2_rec: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_loss_value_unpair/avg_num, avg_loss_color2_unpair/avg_num, avg_loss_value_rec/avg_num,avg_loss_color2_rec/avg_num, avg_loss_sp/avg_num))
if epoch%5==4:
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+"_raw.pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+".pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
#print (model_float.shape)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
#print (dimf #64, dimf**3,262144, self.test_point_batch_size, 32768 , frame_batch_num 8)
#get frame grid values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_, model_out_, color_out_ = self.im_network(None, z, z_color, point_coord, is_training=False)
#print ('cube 0',torch.unique(color_out_.detach()))
#print ('model out', model_out_.shape, color_out_.shape) torch.Size([1, 32768, 1]) torch.Size([1, 32768, 3])
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
#print (color_out_.shape)
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
#print (frame_flag.shape, x_coords,y_coords,z_coords, x_coords+1, y_coords+1,z_coords+1)
#print (model_out.shape, color_out.shape, self.test_point_batch_size, color_flag[:,x_coords,y_coords,z_coords].shape) (32768, 1) (32768, 3) 32768 (3, 32768)
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
#print (x_coords,y_coords,z_coords,x_coords.shape,y_coords.shape,z_coords.shape)
#print ('cube 1',color_out.shape, np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]).shape, np.reshape(color_out, [3, self.test_point_batch_size]).shape, np.unique(color_cube), color_cube[:,x_coords,y_coords,z_coords].shape, frame_flag[x_coords+1,y_coords+1,z_coords+1].shape)
#get queue and fill up ones
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
#model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print (i,j,k,color_cube[0,i,j,k]*255,color_cube[1,i,j,k]*255,color_cube[2,i,j,k]*255)
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
#color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
#color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
#color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print ('c',color_cube[:,i,j,k], color_cube[:,i,j,k].shape)
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_, model_out_batch_, color_out_batch_ = self.im_network(None, z,z_color, cell_coords, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
#print (model_out_batch.shape, color_out_batch.shape)
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
#color_out = color_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,:]
#print ('color out',color_out.shape)
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
#for c in range(3):
# color_cube_float[c,x_coords+1,y_coords+1,z_coords+1] = color_out[:,c]
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, color_cube
#output shape as ply and point cloud as ply
def test_mesh_point(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if 1: #os.path.exists(checkpoint_txt):
#model_dir='checkpoint/color_all_ae_64/IM_AE.model16-29_raw.pth'
model_dir='checkpoint/color_all_ae_64/IM_AE.model_color_table.pth'
models=torch.load(model_dir)
self.im_network.load_state_dict(torch.load(model_dir),strict=True)
#model_dir='../merge-cyclic-multi-att/checkpoint/color_all_ae_64/IM_AE.model64-209_raw.pth' #IM_AE.model32-199_save_from150.pth'
#self.im_network.load_state_dict(torch.load(model_dir),strict=False)
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#print (self.im_network)
#self.im_network.model.dropout.train()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
idx=0
for data_idx in range(config.start, config.end, 2): #self.datas[config.start:config.end]:
print (len(self.datas), data_idx)
data=self.datas[data_idx]
data_mani=self.datas[data_idx+1]
text_list=data[0].split(' ')[:-1] #.astype('int')
text_array = np.asarray(list(map(int, text_list)))
text_list_mani=data_mani[0].split(' ')[:-1] #.astype('int')
text_array_mani = np.asarray(list(map(int, text_list_mani)))
#print (data[1])
#if '539548' not in data[1]: #c3b6c, ad174 73b369
# continue
path='../hdf5_test_new/'+data[1]+'.hdf5'
name=path.split('/')[-1]
data_dict = h5py.File(path, 'r')
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels), torch.unique(batch_voxels_colors), batch_voxels.shape, batch_voxels_colors.shape)
texts=np.zeros((1, 32))
masks=np.zeros((1, 32))
texts[0,:min(32,len(text_list))]=text_array[:min(32,len(text_list))]
masks[0,:min(32,len(text_list))]=1
texts=torch.from_numpy(texts).to(self.device).long()
masks=torch.from_numpy(masks).to(self.device).bool()
texts_mani=np.zeros((1, 32))
masks_mani=np.zeros((1, 32))
texts_mani[0,:min(32,len(text_list_mani))]=text_array_mani[:min(32,len(text_list_mani))]
masks_mani[0,:min(32,len(text_list_mani))]=1
texts_mani=torch.from_numpy(texts_mani).to(self.device).long()
masks_mani=torch.from_numpy(masks_mani).to(self.device).bool()
model_z,_, z_vector_c2, _,_,_,_, words= self.im_network(texts, masks, None, None,None, None,None, None,None, is_training=False)
'''shape=np.expand_dims(np.load('/mnt/sda/lzz/mani-imle-onlycolor-table/shape.npy'),0)
shape[np.where(shape<0.5)]=0
shape[np.where(shape>0.5)]=1
color=np.load('/mnt/sda/lzz/mani-imle-onlycolor-table/color.npy')
print (np.unique(shape), np.unique(color), 'unique')
inputs = torch.from_numpy(np.expand_dims(np.concatenate((shape, color),0),0)).cuda()
print (inputs.shape)
model_z, z_vector_c2 = self.im_network.encoder(inputs, is_training=0)'''
#model_z=torch.from_numpy(np.load('/mnt/sda/lzz/mani-imle-onlyshape-table/shapefeat.npy')).cuda()
#z_vector_c2=torch.from_numpy(np.load('/mnt/sda/lzz/mani-imle-onlyshape-table/colorfeat.npy')).cuda()
model_z_mani,_, z_vector_c2_mani, _,_,_,_, words_mani= self.im_network(texts_mani, masks_mani, None, None,None, None,None, None,None, is_training=False)
num_noise=1
noise = torch.randn(model_z.size(0)*num_noise, 64).float().to('cuda')
#noise=torch.from_numpy(np.load('../mani-imle-onlyshape-table/noise.npy')[model_z.size(0)*2:model_z.size(0)*3, :]).float().to('cuda')
z_in=torch.reshape(torch.unsqueeze(torch.cat((model_z, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.im_network.net_g(z_in, noise.detach()*1)
z_in=torch.reshape(torch.unsqueeze(torch.cat((model_z, z_vector_c2_mani), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div_mani=self.im_network.net_g(z_in, noise.detach()*1)
model_zs=z_div[:, :256]
z_vector_c2s=z_div[:, 256:]
model_zs_mani=z_div_mani[:, :256]
z_vector_c2s_mani=z_div_mani[:, 256:]
for idx in range(model_zs.shape[0]):
#print (idx)
model_z=model_zs[idx,:]
z_vector_c2=z_vector_c2s[idx,:]
model_z_mani=model_zs[idx,:]
z_vector_c2_mani=z_vector_c2s_mani[idx,:]
#start=time.time()
model_float, color_cube_float, frame_flag, color_cube = self.z2voxel(model_z, z_vector_c2, words, texts, config)
model_float_mani, color_cube_float_mani, frame_flag_mani, color_cube_mani = self.z2voxel(model_z, z_vector_c2_mani, words_mani, texts_mani, config)
#print (time.time()-start)
from plyfile import PlyData,PlyElement
some_array=[]
size=258
for i in range(1,64):
for j in range(1,64):
for k in range(1,64):
if frame_flag[1:-1,1:-1,1:-1][int(i),int(j),int(k)]>0.5:
some_array.append((i,j,k,color_cube[2,int(i),int(j),int(k)]*255,color_cube[1,int(i),int(j),int(k)]*255,color_cube[0,int(i),int(j),int(k)]*255))
some_array = np.array(some_array, dtype=[('x', 'float32'), ('y', 'float32'), ('z', 'float32'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8')])
el = PlyElement.describe(some_array, 'vertex')
'''PlyData([el]).write('show/'+name+str(data[2][:50].replace('/',' '))+str(idx)+'test_new_input.ply')
print (frame_flag.shape, color_cube.shape)
np.save('shape.npy', frame_flag )
np.save('color.npy', color_cube )'''
'''shape64=torch.unsqueeze(torch.unsqueeze(torch.from_numpy(model_float).cuda(),0),0)
color64=torch.unsqueeze(torch.from_numpy(color_cube_float).cuda(),0)
color64[:,0,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
color64[:,1,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
color64[:,2,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
cube_float64=torch.cat((shape64,color64),1)[:,:,1:-1,1:-1,1:-1][:,:,0:256:4,0:256:4,0:256:4]
model_z_shape, _, z_vector_c2_shape,_,_,_,_ = self.im_network(None,None, cube_float64, None,None, None,None, None, words, is_training=False)
#print (model_z.shape, z_vector_c2.shape)
text_feat=torch.cat((model_z.unsqueeze(0),z_vector_c2.unsqueeze(0)),1).detach().cpu().numpy()
shape_feat=torch.cat((model_z_shape,z_vector_c2_shape),1).detach().cpu().numpy()
#print (text_feat.shape, shape_feat.shape)
print ('dis',np.sum(np.abs(text_feat-shape_feat)))
np.save('val_evaluate/shape_feat/'+data[1]+'_'+str(data[2][:50].replace('/',' '))+str(idx)+'.npy', shape_feat)
np.save('val_evaluate/text_feat/'+data[1]+'_'+str(data[2][:50].replace('/',' '))+str(idx)+'.npy', text_feat)'''
model_pad=np.zeros((66,66,66))
model_pad[1:-1,1:-1,1:-1]=frame_flag[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_pad, self.sampling_threshold)
color_cube= color_cube
x = np.linspace(0, 66, 66)
y = np.linspace(0, 66, 66)
z = np.linspace(0, 66, 66)
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show/"+str(name)+str(data[2].replace('/',' '))+"_mesh_pred"+str(idx)+".ply", vertices, triangles, colors)
model_pad=np.zeros((66,66,66))
model_pad[1:-1,1:-1,1:-1]= frame_flag_mani[1:-1,1:-1,1:-1]
vertices, triangles = mcubes.marching_cubes(model_pad, self.sampling_threshold)
x = np.linspace(0, 66, 66)
y = np.linspace(0, 66, 66)
z = np.linspace(0, 66, 66)
color_cube=color_cube_mani
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show/"+str(name)+str(data_mani[2].replace('/',' '))+"_mesh_mani_"+str(idx)+".ply", vertices, triangles, colors)
if config.high_resolution:
model_pad=np.zeros((258,258,258))
model_pad[1:-1,1:-1,1:-1]= model_float[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
x = np.linspace(0, 258,258)
y = np.linspace(0, 258,258)
z = np.linspace(0, 258,258)
color_cube=color_cube_float
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show/"+str(name)+str(data[2].replace('/',' '))+"_pred_258_"+str(idx)+".ply", vertices, triangles, colors)
model_pad=np.zeros((258,258,258))
model_pad[1:-1,1:-1,1:-1]= model_float_mani[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_float_mani, self.sampling_threshold)
x = np.linspace(0, 258,258)
y = np.linspace(0, 258,258)
z = np.linspace(0, 258,258)
color_cube=color_cube_float_mani
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show/"+str(name)+str(data_mani[2].replace('/',' '))+"_mesh_258_"+str(idx)+".ply", vertices, triangles, colors)
def get_z(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
model_dir='checkpoint/color_all_ae_64/IM_AE.model16-199_raw.pth'
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
for path in glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5_train/*.hdf5'): #self.data_paths: #[config.start:config.end]:
print (path)
name=path.split('/')[-1]
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels_colors))
#z_vector, _, _ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None, None, is_training=False)
#model_z,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
model_z,z_vector_color,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
z=model_z.detach().cpu().numpy()
z_vector_color=z_vector_color.detach().cpu().numpy()
#print (z.shape, z_vector_color.shape)
z=np.concatenate((z,z_vector_color),1)
print (z.shape)
np.save('../feat32_color_train/'+name+'.npy',z)
'''#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
hdf5_path = self.checkpoint_dir+'/'+self.model_dir+'/'+self.dataset_name+'_train_z.hdf5'
shape_num = len(self.data_voxels)
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("zs", [shape_num,self.z_dim], np.float32)
self.im_network.eval()
#print(shape_num)
for t in range(shape_num):
batch_voxels = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels)
batch_voxels = batch_voxels.to(self.device)
out_z,_ ,_= self.im_network(batch_voxels, None, None, is_training=False)
hdf5_file["zs"][t:t+1,:] = out_z.detach().cpu().numpy()
hdf5_file.close()
print("[z]")'''
def test_z(self, config, batch_z, dim):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
for t in range(batch_z.shape[0]):
model_z = batch_z[t:t+1]
model_z = torch.from_numpy(model_z)
model_z = model_z.to(self.device)
model_float = self.z2voxel(model_z)
#img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
#img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
#img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
#print (model_float)
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
vertices = (vertices.astype(np.float32)-0.5)/self.real_size-0.5
#vertices = self.optimize_mesh(vertices,model_z)
write_ply(config.sample_dir+"/"+"out"+str(t)+".ply", vertices, triangles)
print("[sample Z]")
| [] |
2024-01-10 | liuzhengzhe/Towards-Implicit-Text-Guided-Shape-Generation | manipulation~model_shape_chair.py | import os,csv
import time
import math
import random
import numpy as np
import h5py
import glob
import scipy.interpolate
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from scipy.interpolate import RegularGridInterpolator
import mcubes
import mcubes as mc
from utils import *
import copy
from mcubes import marching_cubes #, grid_interp
#pytorch 1.2.0 implementation
#from dalle_pytorch import OpenAIDiscreteVAE, DALLE
#from dalle_pytorch.transformer import Transformer,Transformer_mutual
from transformers import AutoModelForSequenceClassification, AutoConfig
from torch.nn.utils import spectral_norm
from pytorch_lamb import Lamb
from transformers import (
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
)
def iou(s1,s2):
pred=torch.flatten(s1)
gt=torch.flatten(s2)
inter1=torch.where((pred==1) & (gt==1))[0].shape[0]
union1=torch.where((pred==1) | (gt==1))[0].shape[0]
iou=inter1/union1
return iou
def grid_interp(vol, points):
"""
Interpolate volume data at given points
Inputs:
vol: 4D torch tensor (C, Nz, Ny, Nx)
points: point locations (Np, 3)
Outputs:
output: interpolated data (Np, C)
"""
#vol=torch.from_numpy(vol)#.cuda()
if vol.is_cuda:
return mc.grid_interp_cuda(vol, points)
else:
return mc.grid_interp_cpu(vol, points) #'''===
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len = 80):
super().__init__()
self.d_model = d_model
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
#print ('xshape', x.shape, seq_len)
x = x + Variable(self.pe[:,:seq_len], requires_grad=False).cuda()
return x
def attention(q, k, v, d_k, mask=None, dropout=None):
#print ('qkv',q.shape, k.shape, v.shape)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
#print ('mask score ', mask.shape, scores.shape)
#print ('s1',scores.shape)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
#print ('s2',scores.shape)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
#print (scores.shape, v.shape)
output = torch.matmul(scores, v)
#print ('output',output.shape)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model, bias=True)
self.v_linear = nn.Linear(d_model, d_model, bias=True)
self.k_linear = nn.Linear(d_model, d_model, bias=True)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model, bias=True)
def forward(self, q, k, v, mask=None):
#print ('qkv',q.shape, k.shape, v.shape)
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
#print (k.shape, q.shape, v.shape, self.d_k, mask.shape)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
#print ('score',scores.shape)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous()\
.view(bs, -1, self.d_model)
#print ('cct',concat.shape)
output = self.out(concat)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=16, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff, bias=True)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model, bias=True)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-5):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
#self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
#self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(heads, d_model) #nn.MultiheadAttention(embed_dim=16, num_heads=4)
self.ff = FeedForward(d_model).cuda()
def forward(self, x, e_outputs, src_mask):
#print ('1',self.norm_2.bias)
#x2 = self.norm_1(x)
#x = x + self.dropout_1(self.attn_1(x2, x2, x2)) # trg_mask
x = self.norm_2(x)
#print ('2',torch.unique(x))
#x=torch.transpose(x,0,1)
#e_outputs=torch.transpose(e_outputs,0,1)
#print ('x,e',x.shape, e_outputs.shape)
#print (self.attn_2(x, e_outputs, e_outputs)[0].shape, x.shape)
x = x +self.dropout_2(self.attn_2(x, e_outputs, e_outputs.clone(), src_mask))
# x=torch.transpose(x,0,1)
#print ('3',torch.unique(x))
x = self.norm_3(x)
#print ('4',torch.unique(x))
x = x+self.dropout_3(self.ff(x))
#print ('5',torch.unique(x))
return x
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class generator(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
'''dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
#print (points.shape, z.shape)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
#print ('pointz',torch.unique(pointz), torch.unique(texts))
#print ('weight', torch.unique(self.linear_text_k.weight), torch.unique(self.linear_shape_q.weight))
#print ('bias', torch.unique(self.linear_text_k.bias), torch.unique(self.linear_shape_q.bias))
x=linear_shape_q
src_mask=masks
#print (masks.shape)
'''x = self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
#print ('b',linear_text_k.shape, texts.shape)
linear_text_k = self.pe(linear_text_k)
#print ('a',linear_text_k.shape)
#print ('x1',torch.unique(x),self.linear_text_k.)
#print ('linear_text_k',torch.unique(linear_text_k))
for i in range(self.N):
#print ('i',i,x.shape, linear_text_k.shape, src_mask.shape)
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#print (torch.unique(pointz),torch.unique(x))
pointz = torch.cat([pointz, x],2)
#print (torch.unique(position_sense_feat))
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l7
class generator_color(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator_color, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
#multihead_attn = nn.MultiheadAttention(embed_dim=16, num_heads=4)
#self.transformer_model = nn.Transformer(d_model=16, nhead=4, num_encoder_layers=0, num_decoder_layers=1, dim_feedforward=16)
'''self.softmax=torch.nn.Softmax(1)
dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
pointz = torch.cat([points,zs],2)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
x=linear_shape_q
#linear_text_k = self.pe(linear_text_k)
#print ('generator color',torch.unique(x))
src_mask=masks
for i in range(self.N):
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#torch.nn.Transformer(d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', custom_encoder=None, custom_decoder=None)
#attn_output, attn_output_weights = multihead_attn(x, key, value)
#print (x.shape,linear_text_k.shape)
#x = self.transformer_model(torch.transpose(linear_text_k,0,1), torch.transpose(x,0,1) )
#print (x.shape)
#x=torch.transpose(x,0,1)
#print (torch.unique(pointz),torch.unique(x))
#print (masks.shape)
'''x =self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
'''linear_text_k = self.linear_text_k(texts)
linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz)
att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
pointz = torch.cat([pointz, x],2)
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
#l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
#l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l8
class encoder(nn.Module):
def __init__(self, ef_dim, z_dim):
super(encoder, self).__init__()
self.ef_dim = ef_dim
self.z_dim = z_dim
self.conv_1 = nn.Conv3d(1+3, self.ef_dim, 4, stride=2, padding=1, bias=False)
self.in_1 = nn.InstanceNorm3d(self.ef_dim)
self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim*2, 4, stride=2, padding=1, bias=False)
self.in_2 = nn.InstanceNorm3d(self.ef_dim*2)
self.conv_3 = nn.Conv3d(self.ef_dim*2, self.ef_dim*4, 4, stride=2, padding=1, bias=False)
self.in_3 = nn.InstanceNorm3d(self.ef_dim*4)
self.conv_4 = nn.Conv3d(self.ef_dim*4, self.ef_dim*8, 4, stride=2, padding=1, bias=False)
self.in_4 = nn.InstanceNorm3d(self.ef_dim*8)
self.conv_5 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
self.conv_6 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv_1.weight)
nn.init.xavier_uniform_(self.conv_2.weight)
nn.init.xavier_uniform_(self.conv_3.weight)
nn.init.xavier_uniform_(self.conv_4.weight)
nn.init.xavier_uniform_(self.conv_5.weight)
nn.init.constant_(self.conv_5.bias,0)
nn.init.xavier_uniform_(self.conv_6.weight)
nn.init.constant_(self.conv_6.bias,0)
def forward(self, inputs, is_training=False):
#print ('input',inputs.shape)
d_1 = self.in_1(self.conv_1(inputs))
d_1 = F.leaky_relu(d_1, negative_slope=0.02, inplace=True)
d_2 = self.in_2(self.conv_2(d_1))
d_2 = F.leaky_relu(d_2, negative_slope=0.02, inplace=True)
d_3 = self.in_3(self.conv_3(d_2))
d_3 = F.leaky_relu(d_3, negative_slope=0.02, inplace=True)
d_4 = self.in_4(self.conv_4(d_3))
d_4 = F.leaky_relu(d_4, negative_slope=0.02, inplace=True)
d_5 = self.conv_5(d_4)
d_5 = d_5.view(-1, self.z_dim)
d_5 = torch.sigmoid(d_5)
d_6 = self.conv_6(d_4)
d_6 = d_6.view(-1, self.z_dim)
d_6 = torch.sigmoid(d_6)
return d_5, d_6
class ConditionInjection(nn.Module):
def __init__(self, num_features, condition_dim=64, norm=nn.LayerNorm):
super().__init__()
if norm is not None:
if norm is nn.LayerNorm:
self.norm = norm(num_features, elementwise_affine=False)
elif norm is nn.BatchNorm1d:
self.norm = norm(num_features, affine=False)
else:
raise ValueError('unknown norm type')
self.condition_dim = condition_dim
fc1 = nn.Linear(condition_dim, condition_dim)
fc1 = initLinear(fc1)
fc1 = spectral_norm(fc1)
fc2 = nn.Linear(condition_dim, num_features * 2)
fc2 = initLinear(fc2)
fc2 = spectral_norm(fc2)
self.projection = nn.Sequential(
fc1,
nn.LeakyReLU(.2),
fc2,
)
def forward(self, x, z=None): # x shape
if z is None:
z = torch.randn(x.size(0), self.condition_dim).float().cuda()
y = self.projection(z)
#print (x.shape, y.shape, z.shape)
for _ in range(x.dim() - y.dim()):
y = y.unsqueeze(-1)
gamma, beta = y.chunk(2, 1)
# print(gamma.shape, beta.shape)
out = self.norm(x) if self.norm is not None else x
out = out * (1+gamma) + beta
return out
def initLinear(block, type='xavier_uniform'):
assert type in ['xavier_uniform']
nn.init.xavier_uniform_(block.weight, gain=0.02)
nn.init.constant_(block.bias, 0)
return block
class LatentGenerator(nn.Module):
def __init__(self, num_features, hidden=64, z_dim=64):
super().__init__()
fc1 = nn.Linear(num_features, num_features)
fc2 = nn.Linear(num_features, num_features)
fc3 = nn.Linear(num_features, hidden)
fc4 = nn.Linear(hidden, hidden*2)
fc5 = nn.Linear(hidden*2, num_features)
fc6 = nn.Linear(num_features, num_features)
fc1 = initLinear(fc1)
fc2 = initLinear(fc2)
fc3 = initLinear(fc3)
fc4 = initLinear(fc4)
fc5 = initLinear(fc5)
fc6 = initLinear(fc6)
self.fc1 = spectral_norm(fc1)
self.fc2 = spectral_norm(fc2)
self.fc3 = spectral_norm(fc3)
self.fc4 = spectral_norm(fc4)
self.fc5 = spectral_norm(fc5)
self.fc6 = spectral_norm(fc6)
self.norm1 = ConditionInjection(num_features, z_dim)
self.norm2 = ConditionInjection(hidden, z_dim)
# self.norm3 = ConditionInjection(num_features, z_dim)
def forward(self, x, z=None):
if z is None:
z = torch.randn(x.size(0), self.condition_dim).float().cuda()
out = self.fc1(x)
out = self.norm1(out, z)
out = F.leaky_relu(out, .2)
out = self.fc2(out)
out = F.leaky_relu(out, .2)
out = self.fc3(out)
out = self.norm2(out, z)
out = F.leaky_relu(out, .2)
out = self.fc4(out)
out = F.leaky_relu(out, .2)
out = self.fc5(out)
out = F.leaky_relu(out, .2)
out = self.fc6(out)
out = F.sigmoid(out)
return out
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = 512
self.c_dim = 512
self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)
self.relu = nn.ReLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.cuda.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class im_network(nn.Module):
def __init__(self, ef_dim, gf_dim, z_dim, point_dim):
super(im_network, self).__init__()
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.z_dim = z_dim
self.point_dim = point_dim
self.encoder = encoder(self.ef_dim, self.z_dim)
pretrained_path='bert-base-uncased'
config = AutoConfig.from_pretrained(
str(pretrained_path), #num_labels=len(dataBunch.labels)
)
self.model = AutoModelForSequenceClassification.from_pretrained(
str(pretrained_path), config=config, state_dict=None
)
#self.ca=CA_NET()
self.net_g=LatentGenerator(512)
#self.net_g_color=LatentGenerator(256)
self.encoder = encoder(self.ef_dim, self.z_dim)
self.generator = generator(self.z_dim, self.point_dim, self.gf_dim)
self.generator_color = generator_color(self.z_dim, self.point_dim, self.gf_dim)
self.cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
def forward(self, texts, masks, inputs, z_vector, input_idxs, z_vector_c2,noise,point_coord, words, is_training=False):
if texts!=None:
text_inputs = {
"input_ids": texts,
"attention_mask": masks,
}
if is_training:
#print ('traiing')
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
z_vector, _, z_vector_c2, words = self.model(**text_inputs)
#num_noise=10
#noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
#print ('noise.shape',noise.shape)
num_noise=int(noise.shape[0]/z_vector.size(0))
#z_vector_ca, mu, var=self.ca(torch.cat((z_vector, z_vector_c2), 1))
#z_vector=z_vector_ca[:,:256]
#z_vector_c2=z_vector_ca[:,256:]
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
#print ('2', torch.unique(z_in))
#z_noise=torch.cat((z_in, noise), 2)
z_div=self.net_g(z_in, noise.detach())
#print ('3', torch.unique(z_div))
z_div=torch.reshape(z_div, (-1, num_noise, 512))
z_std=torch.unsqueeze(torch.cat((z_vector_std, z_vector_c_std), 1),1).repeat(1,num_noise,1)
#print ('4', torch.unique(z_std))
diff=torch.sum(torch.abs(z_div-z_std),2)
#print ('diff', diff.shape)
idx=torch.argmin(diff,1)
#print ('5', idx, idx.shape)
#z_best=z_div[:,idx,:]
#print (z_div.shape, z_best.shape, z_best)
idxs=torch.unsqueeze(torch.unsqueeze(idx,-1),-1)
if input_idxs!=None:
idxs=input_idxs
idxs_=idxs.repeat(1, num_noise, z_in.shape[-1])
#print ('idx, z div', idx.shape, z_div.shape, torch.unique(idx))
#print ('z_div', z_div.shape, idxs_.shape)
z_best=torch.gather(z_div, 1, idxs_)[:,0,:]
#print ('z_best', z_best.shape)
#z_best=torch.index_select(z_div, 0, idx)
#print ('z best', z_best.shape, z_best)
z_vector_div=z_best[:, :256]
z_vector_c2_div=z_best[:, 256:]
return z_vector_div,idxs, z_vector_c2_div, z_vector_std,None, z_vector_c_std, None, None, z_vector, z_vector_c2, words
else:
if texts is not None:
z_vector,z_vector_color, z_vector_c2, words = self.model(**text_inputs)
#z_vector_ca, mu, var=self.ca(torch.cat((z_vector, z_vector_c2), 1))
#z_vector=z_vector_ca[:,:256]
#z_vector_c2=z_vector_ca[:,256:]
'''num_noise=30
noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.net_g(z_in, noise.detach()*25)
z_vector=z_div[:, :256]
z_vector_c2=z_div[:, 256:]'''
return z_vector, None, z_vector_c2,None, None,None,None, words
if z_vector is not None and point_coord is not None:
#print (point_coord.shape, z_vector.shape, words.shape, masks.shape)
net_out = self.generator(point_coord, z_vector, words, masks, is_training=is_training)
net_out_color = self.generator_color(point_coord, z_vector_c2, words, masks, is_training=is_training)
#print ('net out unique', torch.unique(net_out))
return None,None,None, net_out, net_out_color, None, None #, residue_color+s1_color, s1_color
#elif z_vector is not None and point_coord is not None:
# net_out = self.generator(point_coord, z_vector, is_training=is_training)
# return None,None,None, net_out, None,None,None,
elif (inputs is not None) and (inputs.shape[1]==4):
#z_vector_std, z_vector_color_std, z_vector_c2_std = self.encoder(inputs, is_training=is_training)
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
return z_vector_std,None, z_vector_c_std,None, None,None,None #, net_out, None,None,None,
class IM_shape_chair(object):
def __init__(self, config):
#progressive training
#1-- (16, 16*16*16)
#2-- (32, 16*16*16)
#3-- (64, 16*16*16*4)
self.sample_vox_size = config.sample_vox_size
print (self.sample_vox_size)
if self.sample_vox_size==16:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==32:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==64:
self.load_point_batch_size = 16*16*16*4
self.point_batch_size = 16*16*16
self.shape_batch_size = 10
self.input_size = 64 #input voxel grid size
self.ef_dim = 32
self.gf_dim = 128
self.z_dim = 256
self.point_dim = 3
self.dataset_name = config.dataset
#self.dataset_load = self.dataset_name + '_train'
#self.data_paths=glob.glob('hdf5/*.hdf5') #/ccd5e*.hdf5')
self.datas=[]
#start=1
with open('chair_train.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
#if start==1:
# start=0
# continue
text=row[2]
name=row[1]
self.datas.append((text,name))
#break
#for i in range(32):
# self.datas.append(self.datas[0])
if not (config.train):# or config.getz):
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
self.datas=[]
with open('shape_chair.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
text=row[2]
name=row[1]
text_str=row[0]
self.datas.append((text,name,text_str))
#self.data_paths.sort()
#self.dataset_load = self.dataset_name + '_test'
self.checkpoint_dir = config.checkpoint_dir
self.data_dir = config.data_dir
#data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
#print ('data name lzz',data_hdf5_name)
'''if not (config.train or config.getz):
self.dataset_load = self.dataset_name + '_test'
data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
data_dict = h5py.File(data_hdf5_name, 'r')
print ('load')
self.data_points = (data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5
self.data_values = data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32)
self.data_colors = data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0
self.data_voxels = data_dict['voxels'][:]
self.data_voxels_colors = data_dict['voxels_colors'][:]/255.0
self.data_voxels_colors = np.transpose(self.data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors = np.reshape(self.data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size])
#reshape to NCHW
self.data_voxels = np.reshape(self.data_voxels, [-1,1,self.input_size,self.input_size,self.input_size])
#else:
# print("error: cannot load "+data_hdf5_name)
# exit(0)'''
#print ('loaded')
if torch.cuda.is_available():
self.device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device('cpu')
#build model
self.im_network = im_network(self.ef_dim, self.gf_dim, self.z_dim, self.point_dim)
self.im_network.to(self.device)
#print params
for param_tensor in self.im_network.model.parameters():
param_tensor.requires_grad=False
for param_tensor in self.im_network.encoder.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator_color.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
self.optimizer = torch.optim.Adam(self.im_network.parameters(), lr=0.001, betas=(config.beta1, 0.999))
#self.optimizer = self.get_optimizer(0.001, optimizer_type="lamb")
#self.optimizer = torch.optim.Adam([{'params': base_params}, {'params': self.im_network.model.parameters(), 'lr': 0.001}], lr=config.learning_rate*1, betas=(config.beta1, 0.999))
#self.scheduler = self.get_scheduler(
# self.optimizer, t_total=int(60470*config.epoch), schedule_type="warmup_cosine"
#)
#pytorch does not have a checkpoint manager
#have to define it myself to manage max num of checkpoints to keep
self.max_to_keep = 2
self.checkpoint_path = os.path.join(self.checkpoint_dir, self.model_dir)
self.checkpoint_name='IM_shape_chair.model'
self.checkpoint_manager_list = [None] * self.max_to_keep
self.checkpoint_manager_pointer = 0
#loss
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
self.KL_loss=KL_loss
def network_loss(G,point_value):
return torch.mean((G-point_value)**2)
def network_loss_mask(G,point_value,mask):
mask=mask.unsqueeze(1).repeat(1,256)
#print (G.shape, point_value.shape, mask.shape)
return torch.mean(((G-point_value)**2)*mask)
self.loss = network_loss
self.loss_mask = network_loss_mask
def color_loss(G,point_color,mask):
return torch.mean(((G-point_color)*mask)**2)
self.color_loss = color_loss
#keep everything a power of 2
self.cell_grid_size = 4
self.frame_grid_size = 64
self.real_size = self.cell_grid_size*self.frame_grid_size #=256, output point-value voxel grid size in testing
self.test_size = 32 #related to testing batch_size, adjust according to gpu memory size
self.test_point_batch_size = self.test_size*self.test_size*self.test_size #do not change
self.test_point_batch_size_in_training=4096
#get coords for training
dima = self.test_size
dim = self.frame_grid_size
self.aux_x = np.zeros([dima,dima,dima],np.uint8)
self.aux_y = np.zeros([dima,dima,dima],np.uint8)
self.aux_z = np.zeros([dima,dima,dima],np.uint8)
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
for i in range(dima):
for j in range(dima):
for k in range(dima):
self.aux_x[i,j,k] = i*multiplier
self.aux_y[i,j,k] = j*multiplier
self.aux_z[i,j,k] = k*multiplier
self.coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
self.coords[i*multiplier2+j*multiplier+k,:,:,:,0] = self.aux_x+i
self.coords[i*multiplier2+j*multiplier+k,:,:,:,1] = self.aux_y+j
self.coords[i*multiplier2+j*multiplier+k,:,:,:,2] = self.aux_z+k
self.coords = (self.coords.astype(np.float32)+0.5)/dim-0.5
self.coords = np.reshape(self.coords,[multiplier3,self.test_point_batch_size,3])
self.coords = torch.from_numpy(self.coords)
self.coords = self.coords.to(self.device)
#get coords for testing
dimc = self.cell_grid_size
dimf = self.frame_grid_size
self.cell_x = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_y = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_z = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_coords = np.zeros([dimf,dimf,dimf,dimc,dimc,dimc,3],np.float32)
self.frame_coords = np.zeros([dimf,dimf,dimf,3],np.float32)
s=16
self.frame_coords_train = torch.zeros([s,s,s,3]).cuda()
self.frame_x = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_y = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_z = np.zeros([dimf,dimf,dimf],np.int32) #.long()
for i in range(dimc):
for j in range(dimc):
for k in range(dimc):
self.cell_x[i,j,k] = i
self.cell_y[i,j,k] = j
self.cell_z[i,j,k] = k
for i in range(dimf):
for j in range(dimf):
for k in range(dimf):
self.cell_coords[i,j,k,:,:,:,0] = self.cell_x+i*dimc
self.cell_coords[i,j,k,:,:,:,1] = self.cell_y+j*dimc
self.cell_coords[i,j,k,:,:,:,2] = self.cell_z+k*dimc
self.frame_coords[i,j,k,0] = i
self.frame_coords[i,j,k,1] = j
self.frame_coords[i,j,k,2] = k
self.frame_x[i,j,k] = i
self.frame_y[i,j,k] = j
self.frame_z[i,j,k] = k
for i in range(s):
for j in range(s):
for k in range(s):
self.frame_coords_train[i,j,k,0] = i
self.frame_coords_train[i,j,k,1] = j
self.frame_coords_train[i,j,k,2] = k
self.cell_coords = (self.cell_coords.astype(np.float32)+0.5)/self.real_size-0.5
self.cell_coords = np.reshape(self.cell_coords,[dimf,dimf,dimf,dimc*dimc*dimc,3])
self.cell_x = np.reshape(self.cell_x,[dimc*dimc*dimc])
self.cell_y = np.reshape(self.cell_y,[dimc*dimc*dimc])
self.cell_z = np.reshape(self.cell_z,[dimc*dimc*dimc])
self.frame_x = np.reshape(self.frame_x,[dimf*dimf*dimf])
self.frame_y = np.reshape(self.frame_y,[dimf*dimf*dimf])
self.frame_z = np.reshape(self.frame_z,[dimf*dimf*dimf])
self.frame_coords = (self.frame_coords+0.5)/dimf-0.5
self.frame_coords = np.reshape(self.frame_coords,[dimf*dimf*dimf,3])
self.frame_coords_train = (self.frame_coords_train+0.5)/s-0.5
self.frame_coords_train = torch.reshape(self.frame_coords_train,[s*s*s,3])
#self.conv_edge = nn.Conv3d(3, 3, 3, stride=1, padding=1, groups=3, bias=False)
#self.conv_edge.to(self.device)
self.sampling_threshold = 0.5 #final marching cubes threshold
self.upsample=nn.Upsample(scale_factor=4,mode='trilinear').cuda()
def get_optimizer(self, lr, optimizer_type="lamb"):
# Prepare optimiser and schedule
no_decay = [] #"bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.im_network.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.0, #self.weight_decay,
},
{
"params": [
p
for n, p in self.im_network.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
if optimizer_type == "lamb":
optimizer = Lamb(optimizer_grouped_parameters, lr=lr, eps=1e-8)
elif optimizer_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8
)
return optimizer
def get_scheduler(self, optimizer, t_total, schedule_type="warmup_cosine"):
SCHEDULES = {
"warmup_cosine": get_cosine_schedule_with_warmup,
}
if schedule_type == None or schedule_type == "none":
return SCHEDULES[schedule_type](optimizer)
elif schedule_type == "warmup_constant":
return SCHEDULES[schedule_type](
optimizer, num_warmup_steps=0 #self.warmup_steps
)
else:
return SCHEDULES[schedule_type](
optimizer,
num_warmup_steps=0, #self.warmup_steps,
num_training_steps=t_total,
)
def z2voxel(self, z, z_color, words, masks,config):
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_,_, model_out_, color_out_,_,_ = self.im_network(None,masks,None, z,None, z_color,None, point_coord, words, is_training=False)
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
if config.high_resolution:
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_,_, model_out_batch_, color_out_batch_,_,_ = self.im_network(None, masks,None,z,None,z_color,None, cell_coords, words, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, frame_flag, color_cube
@property
def model_dir(self):
return "{}_ae_{}".format(self.dataset_name, self.input_size)
def train(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
print (checkpoint_txt)
if 1: #os.path.exists(checkpoint_txt):
pass
model_dir='checkpoint/color_all_ae_64/div.model64-149.pth'
self.im_network.load_state_dict(torch.load(model_dir),strict=False)
else:
print(" [!] Load failed...")
shape_num = len(self.datas)
batch_index_list = np.arange(shape_num)
batch_index_list2 = np.arange(shape_num)
print("\n\n----------net summary----------")
print("training samples ", shape_num)
print("-------------------------------\n\n")
start_time = time.time()
assert config.epoch==0 or config.iteration==0
training_epoch = config.epoch + int(config.iteration/shape_num)
batch_num = int(shape_num/self.shape_batch_size)
point_batch_num = int(self.load_point_batch_size/self.point_batch_size)
for epoch in range(0, training_epoch): #int(model_dir.split('/')[-1].split('-')[-1].split('_')[0])
self.im_network.train()
np.random.shuffle(batch_index_list)
np.random.shuffle(batch_index_list2)
avg_loss_sp = 0
avg_loss_kl = 0
avg_loss_color = 0
avg_loss_color2 = 0
avg_loss_value = 0
avg_loss_value_unpair =0
avg_loss_color2_unpair =0
avg_value_out =0
avg_color_out =0
avg_value_out_std =0
avg_color_out_std =0
avg_loss_value_rec =0
avg_loss_color2_rec =0
avg_num = 0
self.data_points=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_values=np.zeros((self.shape_batch_size,self.load_point_batch_size,1))
self.data_colors=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_voxels=np.zeros((self.shape_batch_size,1,64,64,64))
self.data_voxels_colors=np.zeros((self.shape_batch_size,3,64,64,64))
self.data_voxels2=np.zeros((self.shape_batch_size,1,64,64,64))
self.data_voxels_colors2=np.zeros((self.shape_batch_size,3,64,64,64))
#self.pred_voxels=torch.zeros((self.shape_batch_size,1,64,64,64)).to(self.device)
#self.pred_voxels_colors=torch.zeros((self.shape_batch_size,3,64,64,64)).to(self.device)
for idx in range(batch_num):
dxb = batch_index_list[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]
dxb_text = batch_index_list2[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]
self.data_points[:]=0
self.data_values[:]=0
self.data_colors[:]=0
self.data_voxels[:]=0
self.data_voxels_colors[:]=0
self.data_voxels2[:]=0
self.data_voxels_colors2[:]=0
batch_paths_shape=np.asarray(self.datas)[dxb]
batch_paths_text=np.asarray(self.datas)[dxb_text]
texts_unpair=np.zeros((batch_paths_text.shape[0], 64))
masks_unpair=np.zeros((batch_paths_text.shape[0], 64))
texts_pair=np.zeros((batch_paths_text.shape[0], 64))
masks_pair=np.zeros((batch_paths_text.shape[0], 64))
for b in range(batch_paths_shape.shape[0]): #path in batch_paths:
text_list_pair=batch_paths_shape[b][0].split(' ')[:-1] #.astype('int')
text_array_pair = np.asarray(list(map(int, text_list_pair)))
text_list_unpair=batch_paths_text[b][0].split(' ')[:-1] #.astype('int')
text_array_unpair = np.asarray(list(map(int, text_list_unpair)))
path='../hdf5_train_new/'+batch_paths_shape[b][1]+'.hdf5'
name=batch_paths_shape[b][1]
data_dict = h5py.File(path, 'r')
self.data_points[b,:,:]=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values[b,:,:]=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors[b,:,:]=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
texts_unpair[b,:min(64,len(text_list_unpair))]=text_array_unpair[:min(64,len(text_list_unpair))]
masks_unpair[b,:min(64,len(text_list_unpair))]=1
texts_pair[b,:min(64,len(text_list_pair))]=text_array_pair[:min(64,len(text_list_pair))]
masks_pair[b,:min(64,len(text_list_pair))]=1
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors[b,:,:,:,:]=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels[b,:,:,:,:]=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
#print ('color',self.data_voxels_colors[:,:,0,0,0])
path2='../hdf5_train_new/'+batch_paths_text[b][1]+'.hdf5'
name2=batch_paths_text[b][1]
data_dict2 = h5py.File(path2, 'r')
tmp_data_voxels_colors2 = data_dict2['voxels_colors'][:]/255.0
tmp_data_voxels_colors2 = np.transpose(tmp_data_voxels_colors2, (0,4,1,2,3))
self.data_voxels_colors2[b,:,:,:,:]=(np.reshape(tmp_data_voxels_colors2, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels2[b,:,:,:,:]=(np.reshape(data_dict2['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
#print ('datapoints', data_dict['points_'+str(self.sample_vox_size)].shape, self.data_points.shape)
batch_voxels = self.data_voxels.astype(np.float32) #[dxb].astype(np.float32)
batch_voxels_colors = self.data_voxels_colors.astype(np.float32) # [dxb].astype(np.float32)
batch_voxels2 = self.data_voxels2.astype(np.float32) #[dxb].astype(np.float32)
batch_voxels_colors2 = self.data_voxels_colors2.astype(np.float32) # [dxb].astype(np.float32)
if point_batch_num==1:
point_coord = self.data_points#[dxb]
point_value = self.data_values#[dxb]
point_color = self.data_colors#[dxb]
else:
which_batch = 0 #np.random.randint(point_batch_num)
point_coord = self.data_points[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size] #[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_value = self.data_values[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_color = self.data_colors[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
batch_voxels = torch.from_numpy(batch_voxels).float()
batch_voxels_colors = torch.from_numpy(batch_voxels_colors).float()
batch_voxels2 = torch.from_numpy(batch_voxels2).float()
batch_voxels_colors2 = torch.from_numpy(batch_voxels_colors2).float()
ious=torch.zeros((batch_voxels.shape[0])).cuda()
for i in range(batch_voxels.shape[0]):
ious[i]=iou(batch_voxels[i,0,:,:,:],batch_voxels2[i,0,:,:,:])
iou_mask=torch.zeros((batch_voxels.shape[0])).cuda()
iou_mask[torch.where(ious>0.2)]=1
iou_mask=iou_mask.detach()
#print (ious, iou_mask)
point_coord = torch.from_numpy(point_coord).float()
point_value = torch.from_numpy(point_value).float()
point_color = torch.from_numpy(point_color).float()
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = batch_voxels_colors.to(self.device)
batch_voxels2 = batch_voxels2.to(self.device)
batch_voxels_colors2 = batch_voxels_colors2.to(self.device)
point_coord = point_coord.to(self.device)
point_value = point_value.to(self.device)
point_color = point_color.to(self.device)
texts_unpair=torch.from_numpy(texts_unpair).to(self.device).long()
masks_unpair=torch.from_numpy(masks_unpair).to(self.device).bool()
texts=torch.from_numpy(texts_pair).to(self.device).long()
masks=torch.from_numpy(masks_pair).to(self.device).bool()
num_noise=10
noise = torch.randn(texts.size(0)*num_noise, 64).float().to('cuda')
self.im_network.zero_grad()
z_vector,idxs, z_vector_c2, z_vector_std, z_vector_color_std, z_vector_color2_std, net_out, residue_color, z_vector_init, z_vector_c2_init, words = self.im_network(texts,masks, torch.cat((batch_voxels,batch_voxels_colors),1), None,None,None,noise, point_coord, None, is_training=True)
#z_best=torch.gather(z_div, 1, idxs)[:,0,:]
z_vector_unpair,_, z_vector_c2_unpair, z_vector_std_unpair, z_vector_color_std, z_vector_color2_std_unpair, net_out_unpair, residue_color_unpair, z_vector_unpair_init, z_vector_c2_unpair_init, words_unpair = self.im_network(texts_unpair,masks_unpair, torch.cat((batch_voxels2,batch_voxels_colors2),1), None,idxs,None,noise, point_coord, None, is_training=True) #torch.cat((batch_voxels,batch_voxels_colors),1)
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector_unpair_init, z_vector_c2_init), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.im_network.net_g(z_in, noise.detach())
z_div=torch.reshape(z_div, (-1, num_noise, 512))
idxs_=idxs.repeat(1, num_noise, z_in.shape[-1])
#print (z_div.shape, idxs.shape)
z_best=torch.gather(z_div, 1, idxs_)[:,0,:]
z_vector_div=z_best[:, :256]
z_vector_c2_div=z_best[:, 256:]
#print (z_best.shape, 'best')
#sim_shape=self.im_network.cos(z_vector_std,z_vector_std_unpair).unsqueeze(1).repeat(1,256)
#sim_color=self.im_network.cos(z_vector_color2_std,z_vector_color2_std_unpair).unsqueeze(1).repeat(1,256)
#print (z_vector.shape, sim_shape.shape)
#mix_shape=z_vector*(torch.ones(z_vector.shape).cuda()-sim_shape)+z_vector_unpair*sim_shape
#mix_color=z_vector_c2*(torch.ones(z_vector_c2.shape).cuda()-sim_color)+z_vector_c2_unpair*sim_color
#mix_shape_std=z_vector_std*(torch.ones(z_vector_std.shape).cuda()-sim_shape)+z_vector_std_unpair*sim_shape
#mix_color_std=z_vector_color2_std*(torch.ones(z_vector_color2_std.shape).cuda()-sim_color)+z_vector_color2_std_unpair*sim_color
frame_batch_num = 1
point_coord = self.frame_coords_train
point_coord = torch.unsqueeze(point_coord, 0)
point_coord = point_coord.repeat(z_vector.shape[0],1,1)
#print (masks.shape, z_vector_div.shape, z_vector_c2_div.shape, point_coord.shape, words_unpair.shape)
_,_,_,model_out,color_final,_,_ = self.im_network(None, masks, None,z_vector_div, None,z_vector_c2_div, None, point_coord, words_unpair, is_training=False)
#model_out[torch.where(model_out>self.sampling_threshold)]=1
#model_out[torch.where(model_out<=self.sampling_threshold)]=0
model_out_pad=torch.ones(model_out.shape).cuda()-model_out
model_out_pad=torch.cat((model_out, model_out_pad),-1)
model_out_pad=F.gumbel_softmax(model_out_pad, tau=1, hard=False)
#print (model_out)
model_out= model_out_pad[:,:,0]
#print (model_out, 'model out')
s=16
model_out=torch.reshape(model_out, (z_vector.shape[0],1,s,s,s))
pred_shape=self.upsample(model_out) #self.pred_voxels[:]=
color_final=torch.transpose(color_final,1,2)
color_final=torch.reshape(color_final, (z_vector.shape[0],3,s,s,s))
pred_color=self.upsample(color_final)
pred_color[:,0,:,:,:][torch.where(pred_shape[:,0,:,:,:]<0.5)]=0
pred_color[:,1,:,:,:][torch.where(pred_shape[:,0,:,:,:]<0.5)]=0
pred_color[:,2,:,:,:][torch.where(pred_shape[:,0,:,:,:]<0.5)]=0
#inputs32=torch.cat((batch_voxels,batch_voxels_colors),1) #[:,:,0:64:2,0:64:2,0:64:2]
#inputs32=nn.functional.interpolate(inputs32, scale_factor=0.5, mode='trilinear')
#inputs32=self.upsample(inputs32)
#z_vector_std_16, z_vector_color2_std_16 = self.im_network.encoder(inputs32.detach(), is_training=False)
z_vector_rec, z_vector_c2_rec =self.im_network.encoder(torch.cat((pred_shape, pred_color),1), is_training=False)
#_,_,_,model_out, color_out, color_final , color_s1 = self.im_network(None, None, None, z_vector, z_vector_color, z_vector_c2,None,point_coord, is_training=False)
'''_,_,_,model_out,color_final,_,_ = self.im_network(None, masks, None, z_vector, z_vector_color, z_vector_c2, None, point_coord, words, is_training=False)
model_out[torch.where(model_out>self.sampling_threshold)]=1
model_out[torch.where(model_out<=self.sampling_threshold)]=0
model_out=torch.reshape(model_out, (-1,1,16,16,16))
pred_shape=self.upsample(model_out) #self.pred_voxels[:]=
#_,_,_,_, color_out_, color_final , color_s1 = self.im_network(None, None,pred_shape, z_vector, z_vector_color, z_vector_c2,None, point_coord, is_training=False)
#print (color_out_.shape)
color_final=torch.transpose(color_final,1,2)
color_final=torch.reshape(color_final, (-1,3,16,16,16))
pred_color=self.upsample(color_final) #self.pred_voxels_colors[:]
pred_color[:,0,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
pred_color[:,1,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
pred_color[:,2,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
z_vector_rec, z_vector_c2_rec =self.im_network.encoder(torch.cat((pred_shape, pred_color),1), is_training=False)'''
#z_vector_rec=z_vector_rec.detach()
#z_vector_c2_rec=z_vector_c2_rec.detach()
#kl_loss = self.KL_loss(mu, var)*0.01
errSP_value = self.loss(z_vector, z_vector_std)*2
errSP_color2 = self.loss(z_vector_c2, z_vector_color2_std)*1.0
errSP_value_unpair = self.loss(z_vector_unpair, z_vector_std_unpair)*2
errSP_color2_unpair = self.loss(z_vector_c2_unpair, z_vector_color2_std_unpair)*1.0
#errSP_value_out = self.loss(net_out, point_value)
#point_value3_2=point_value.repeat(1,1,3)
#errSP_color_out = self.color_loss(residue_color, point_color, point_value3_2)*10.0
#errSP_value_out_std = self.loss(net_out_std, point_value)
#errSP_color_out_std = self.color_loss(residue_color_std, point_color, point_value3_2)*10.0
errSP_value_rec = self.loss_mask(z_vector_rec, z_vector, iou_mask)*0.01#*iou_mask.detach()
errSP_color2_rec = self.loss_mask(z_vector_c2_rec, z_vector_c2, iou_mask)*0.01#*iou_mask.detach()
#print (errSP_value_rec)
errSP=errSP_value+ errSP_color2 +errSP_value_unpair+errSP_color2_unpair+errSP_value_rec+errSP_color2_rec #+ errSP_value_out_std+errSP_color_out_std + errSP_value_rec + errSP_color2_rec# +errSP_value_rec+errSP_color_rec+errSP_color2_rec +errSP_value_rec_text +errSP_color_rec_text +errSP_color2_rec_text
errSP.backward()
#nn.utils.clip_grad_norm(list(self.im_network.generator_color.parameters())+list(self.im_network.dalle.parameters()) , 0.05)
#torch.nn.utils.clip_grad_norm_(
# self.im_network.parameters(), 1
#)
self.optimizer.step()
#avg_loss_kl += kl_loss.item()
avg_loss_value += errSP_value.item()
avg_loss_color2 += errSP_color2.item()
avg_loss_value_unpair += errSP_value_unpair.item()
avg_loss_color2_unpair += errSP_color2_unpair.item()
'''avg_value_out_std += errSP_value_out_std.item()
avg_color_out_std += errSP_color_out_std.item()'''
avg_loss_value_rec += errSP_value_rec.item()
#avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()
'''avg_loss_value_rec += errSP_value_rec.item()
avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()
avg_loss_value_rec_text += errSP_value_rec_text.item()
avg_loss_color_rec_text += errSP_color_rec_text.item()
avg_loss_color2_rec_text += errSP_color2_rec_text.item()'''
avg_loss_sp += errSP.item()
avg_num += 1
#print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f, loss_value_out_std: %.6f, loss_color_out_std: %.6f, loss_value_sp_rec: %.6f, loss_color_2_rec: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_value_out_std/avg_num, avg_color_out_std/avg_num, avg_loss_value_rec/avg_num, avg_loss_color2_rec/avg_num, avg_loss_sp/avg_num))
print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f,loss_value_sp_unpair: %.6f, loss_color_sp_unpair: %.6f,loss_value_sp_rec: %.6f, loss_color_2_rec: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_loss_value_unpair/avg_num, avg_loss_color2_unpair/avg_num, avg_loss_value_rec/avg_num,avg_loss_color2_rec/avg_num, avg_loss_sp/avg_num))
if epoch%5==4:
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+"_raw.pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+".pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
#print (model_float.shape)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
#print (dimf #64, dimf**3,262144, self.test_point_batch_size, 32768 , frame_batch_num 8)
#get frame grid values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_, model_out_, color_out_ = self.im_network(None, z, z_color, point_coord, is_training=False)
#print ('cube 0',torch.unique(color_out_.detach()))
#print ('model out', model_out_.shape, color_out_.shape) torch.Size([1, 32768, 1]) torch.Size([1, 32768, 3])
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
#print (color_out_.shape)
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
#print (frame_flag.shape, x_coords,y_coords,z_coords, x_coords+1, y_coords+1,z_coords+1)
#print (model_out.shape, color_out.shape, self.test_point_batch_size, color_flag[:,x_coords,y_coords,z_coords].shape) (32768, 1) (32768, 3) 32768 (3, 32768)
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
#print (x_coords,y_coords,z_coords,x_coords.shape,y_coords.shape,z_coords.shape)
#print ('cube 1',color_out.shape, np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]).shape, np.reshape(color_out, [3, self.test_point_batch_size]).shape, np.unique(color_cube), color_cube[:,x_coords,y_coords,z_coords].shape, frame_flag[x_coords+1,y_coords+1,z_coords+1].shape)
#get queue and fill up ones
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
#model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print (i,j,k,color_cube[0,i,j,k]*255,color_cube[1,i,j,k]*255,color_cube[2,i,j,k]*255)
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
#color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
#color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
#color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print ('c',color_cube[:,i,j,k], color_cube[:,i,j,k].shape)
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_, model_out_batch_, color_out_batch_ = self.im_network(None, z,z_color, cell_coords, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
#print (model_out_batch.shape, color_out_batch.shape)
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
#color_out = color_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,:]
#print ('color out',color_out.shape)
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
#for c in range(3):
# color_cube_float[c,x_coords+1,y_coords+1,z_coords+1] = color_out[:,c]
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, color_cube
#output shape as ply and point cloud as ply
def test_mesh_point(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if 1: #os.path.exists(checkpoint_txt):
#model_dir='checkpoint/color_all_ae_64/IM_AE.model16-29_raw.pth'
model_dir='checkpoint/color_all_ae_64/IM_AE.model_shape_chair.pth'
models=torch.load(model_dir)
self.im_network.load_state_dict(torch.load(model_dir),strict=True)
#model_dir='../merge-cyclic-multi-att/checkpoint/color_all_ae_64/IM_AE.model64-209_raw.pth' #IM_AE.model32-199_save_from150.pth'
#self.im_network.load_state_dict(torch.load(model_dir),strict=False)
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#print (self.im_network)
#self.im_network.model.dropout.train()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
idx=0
for data_idx in range(config.start, config.end, 2): #self.datas[config.start:config.end]:
data=self.datas[data_idx]
data_mani=self.datas[data_idx+1]
text_list=data[0].split(' ')[:-1] #.astype('int')
text_array = np.asarray(list(map(int, text_list)))
text_list_mani=data_mani[0].split(' ')[:-1] #.astype('int')
text_array_mani = np.asarray(list(map(int, text_list_mani)))
print (data[1])
#if '539548' not in data[1]: #c3b6c, ad174 73b369
# continue
path='../hdf5_test_new/'+data[1]+'.hdf5'
data_dict = h5py.File(path, 'r')
name=path.split('/')[-1]
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
texts=np.zeros((1, 32))
masks=np.zeros((1, 32))
texts[0,:min(32,len(text_list))]=text_array[:min(32,len(text_list))]
masks[0,:min(32,len(text_list))]=1
texts=torch.from_numpy(texts).to(self.device).long()
masks=torch.from_numpy(masks).to(self.device).bool()
texts_mani=np.zeros((1, 32))
masks_mani=np.zeros((1, 32))
texts_mani[0,:min(32,len(text_list_mani))]=text_array_mani[:min(32,len(text_list_mani))]
masks_mani[0,:min(32,len(text_list_mani))]=1
texts_mani=torch.from_numpy(texts_mani).to(self.device).long()
masks_mani=torch.from_numpy(masks_mani).to(self.device).bool()
model_z,_, z_vector_c2, _,_,_,_, words= self.im_network(texts, masks, None, None,None, None,None, None,None, is_training=False)
model_z_mani,_, z_vector_c2_mani, _,_,_,_, words_mani= self.im_network(texts_mani, masks_mani, None, None,None, None,None, None,None, is_training=False)
num_noise=1
noise = torch.randn(model_z.size(0)*num_noise, 64).float().to('cuda')
z_in=torch.reshape(torch.unsqueeze(torch.cat((model_z, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.im_network.net_g(z_in, noise.detach()*1)
z_in=torch.reshape(torch.unsqueeze(torch.cat((model_z_mani, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div_mani=self.im_network.net_g(z_in, noise.detach()*1)
model_zs=z_div[:, :256]
z_vector_c2s=z_div[:, 256:]
model_zs_mani=z_div_mani[:, :256]
z_vector_c2s_mani=z_div_mani[:, 256:]
for idx in range(model_zs.shape[0]):
print (idx)
model_z=model_zs[idx,:]
z_vector_c2=z_vector_c2s[idx,:]
model_z_mani=model_zs_mani[idx,:]
z_vector_c2_mani=z_vector_c2s_mani[idx,:]
#start=time.time()
model_float, color_cube_float, frame_flag, color_cube = self.z2voxel(model_z, z_vector_c2, words, texts, config)
model_float_mani, color_cube_float_mani, frame_flag_mani, color_cube_mani = self.z2voxel(model_z_mani, z_vector_c2_mani, words_mani, texts_mani, config)
#print (time.time()-start)
'''from plyfile import PlyData,PlyElement
some_array=[]
size=258
for i in range(1,64):
for j in range(1,64):
for k in range(1,64):
if frame_flag[1:-1,1:-1,1:-1][int(i),int(j),int(k)]>0.5:
some_array.append((i,j,k,color_cube[2,int(i),int(j),int(k)]*255,color_cube[1,int(i),int(j),int(k)]*255,color_cube[0,int(i),int(j),int(k)]*255))
some_array = np.array(some_array, dtype=[('x', 'float32'), ('y', 'float32'), ('z', 'float32'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8')])
el = PlyElement.describe(some_array, 'vertex')
PlyData([el]).write('show/'+name+str(data[2][:50].replace('/',' '))+str(idx)+'test_new_input.ply')
shape64=torch.unsqueeze(torch.unsqueeze(torch.from_numpy(model_float).cuda(),0),0)
color64=torch.unsqueeze(torch.from_numpy(color_cube_float).cuda(),0)
color64[:,0,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
color64[:,1,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
color64[:,2,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
cube_float64=torch.cat((shape64,color64),1)[:,:,1:-1,1:-1,1:-1][:,:,0:256:4,0:256:4,0:256:4]
model_z_shape, _, z_vector_c2_shape,_,_,_,_ = self.im_network(None,None, cube_float64, None,None, None,None, None, words, is_training=False)
#print (model_z.shape, z_vector_c2.shape)
text_feat=torch.cat((model_z.unsqueeze(0),z_vector_c2.unsqueeze(0)),1).detach().cpu().numpy()
shape_feat=torch.cat((model_z_shape,z_vector_c2_shape),1).detach().cpu().numpy()
#print (text_feat.shape, shape_feat.shape)
print ('dis',np.sum(np.abs(text_feat-shape_feat)))
np.save('val_evaluate/shape_feat/'+data[1]+'_'+str(data[2][:50].replace('/',' '))+str(idx)+'.npy', shape_feat)
np.save('val_evaluate/text_feat/'+data[1]+'_'+str(data[2][:50].replace('/',' '))+str(idx)+'.npy', text_feat)'''
model_pad=np.zeros((66,66,66))
model_pad[1:-1,1:-1,1:-1]=frame_flag[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_pad, self.sampling_threshold)
color_cube= color_cube
x = np.linspace(0, 66, 66)
y = np.linspace(0, 66, 66)
z = np.linspace(0, 66, 66)
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_pred"+str(idx)+".ply", vertices, triangles, colors)
model_pad=np.zeros((66,66,66))
model_pad[1:-1,1:-1,1:-1]= frame_flag_mani[1:-1,1:-1,1:-1]
vertices, triangles = mcubes.marching_cubes(model_pad, self.sampling_threshold)
x = np.linspace(0, 66, 66)
y = np.linspace(0, 66, 66)
z = np.linspace(0, 66, 66)
color_cube=color_cube_mani
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_mani_"+str(idx)+".ply", vertices, triangles, colors)
if config.high_resolution:
model_pad=np.zeros((258,258,258))
model_pad[1:-1,1:-1,1:-1]= model_float[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
x = np.linspace(0, 258,258)
y = np.linspace(0, 258,258)
z = np.linspace(0, 258,258)
color_cube=color_cube_float
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show258/"+str(name)+str(data[2][:50].replace('/',' '))+"_pred_258_"+str(idx)+".ply", vertices, triangles, colors)
model_pad=np.zeros((258,258,258))
model_pad[1:-1,1:-1,1:-1]= model_float_mani[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_float_mani, self.sampling_threshold)
x = np.linspace(0, 258,258)
y = np.linspace(0, 258,258)
z = np.linspace(0, 258,258)
color_cube=color_cube_float_mani
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("show258/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_258_"+str(idx)+".ply", vertices, triangles, colors)
def get_z(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
model_dir='checkpoint/color_all_ae_64/IM_AE.model16-199_raw.pth'
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
for path in glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5_train/*.hdf5'): #self.data_paths: #[config.start:config.end]:
print (path)
name=path.split('/')[-1]
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels_colors))
#z_vector, _, _ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None, None, is_training=False)
#model_z,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
model_z,z_vector_color,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
z=model_z.detach().cpu().numpy()
z_vector_color=z_vector_color.detach().cpu().numpy()
#print (z.shape, z_vector_color.shape)
z=np.concatenate((z,z_vector_color),1)
print (z.shape)
np.save('../feat32_color_train/'+name+'.npy',z)
'''#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
hdf5_path = self.checkpoint_dir+'/'+self.model_dir+'/'+self.dataset_name+'_train_z.hdf5'
shape_num = len(self.data_voxels)
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("zs", [shape_num,self.z_dim], np.float32)
self.im_network.eval()
#print(shape_num)
for t in range(shape_num):
batch_voxels = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels)
batch_voxels = batch_voxels.to(self.device)
out_z,_ ,_= self.im_network(batch_voxels, None, None, is_training=False)
hdf5_file["zs"][t:t+1,:] = out_z.detach().cpu().numpy()
hdf5_file.close()
print("[z]")'''
def test_z(self, config, batch_z, dim):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
for t in range(batch_z.shape[0]):
model_z = batch_z[t:t+1]
model_z = torch.from_numpy(model_z)
model_z = model_z.to(self.device)
model_float = self.z2voxel(model_z)
#img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
#img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
#img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
#print (model_float)
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
vertices = (vertices.astype(np.float32)-0.5)/self.real_size-0.5
#vertices = self.optimize_mesh(vertices,model_z)
write_ply(config.sample_dir+"/"+"out"+str(t)+".ply", vertices, triangles)
print("[sample Z]")
| [] |
2024-01-10 | liuzhengzhe/Towards-Implicit-Text-Guided-Shape-Generation | generation~model_div.py | import os,csv
import time
import math
import random
import numpy as np
import h5py
import glob
import scipy.interpolate
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from scipy.interpolate import RegularGridInterpolator
import mcubes
import mcubes as mc
from utils import *
import copy
from mcubes import marching_cubes #, grid_interp
#pytorch 1.2.0 implementation
#from dalle_pytorch import OpenAIDiscreteVAE, DALLE
#from dalle_pytorch.transformer import Transformer,Transformer_mutual
from transformers import AutoModelForSequenceClassification, AutoConfig
from torch.nn.utils import spectral_norm
from pytorch_lamb import Lamb
from math import sqrt
from transformers import (
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
)
def grid_interp(vol, points):
"""
Interpolate volume data at given points
Inputs:
vol: 4D torch tensor (C, Nz, Ny, Nx)
points: point locations (Np, 3)
Outputs:
output: interpolated data (Np, C)
"""
#vol=torch.from_numpy(vol)#.cuda()
if vol.is_cuda:
return mc.grid_interp_cuda(vol, points)
else:
return mc.grid_interp_cpu(vol, points) #'''===
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len = 80):
super().__init__()
self.d_model = d_model
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
#print ('xshape', x.shape, seq_len)
x = x + Variable(self.pe[:,:seq_len], requires_grad=False).cuda()
return x
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
#print ('mask score ', mask.shape, scores.shape)
#print ('s1',scores.shape)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
#print ('s2',scores.shape)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
#print ('output',output.shape)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model, bias=True)
self.v_linear = nn.Linear(d_model, d_model, bias=True)
self.k_linear = nn.Linear(d_model, d_model, bias=True)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model, bias=True)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
#print (k.shape, q.shape, v.shape, self.d_k, mask.shape)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
#print ('score',scores.shape)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous()\
.view(bs, -1, self.d_model)
#print ('cct',concat.shape)
output = self.out(concat)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=16, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff, bias=True)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model, bias=True)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-5):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
#self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
#self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(heads, d_model) #nn.MultiheadAttention(embed_dim=16, num_heads=4)
self.ff = FeedForward(d_model).cuda()
def forward(self, x, e_outputs, src_mask):
#print ('1',self.norm_2.bias)
#x2 = self.norm_1(x)
#x = x + self.dropout_1(self.attn_1(x2, x2, x2)) # trg_mask
x = self.norm_2(x)
#print ('2',torch.unique(x))
#x=torch.transpose(x,0,1)
#e_outputs=torch.transpose(e_outputs,0,1)
#print ('x,e',x.shape, e_outputs.shape)
#print (self.attn_2(x, e_outputs, e_outputs)[0].shape, x.shape)
x = x +self.dropout_2(self.attn_2(x, e_outputs, e_outputs.clone(), src_mask))
# x=torch.transpose(x,0,1)
#print ('3',torch.unique(x))
x = self.norm_3(x)
#print ('4',torch.unique(x))
x = x+self.dropout_3(self.ff(x))
#print ('5',torch.unique(x))
return x
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class generator(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
'''dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
#print (points.shape, z.shape)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
#print ('pointz',torch.unique(pointz), torch.unique(texts))
#print ('weight', torch.unique(self.linear_text_k.weight), torch.unique(self.linear_shape_q.weight))
#print ('bias', torch.unique(self.linear_text_k.bias), torch.unique(self.linear_shape_q.bias))
x=linear_shape_q
src_mask=masks
#print (masks.shape)
'''x = self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
linear_text_k = self.pe(linear_text_k)
#print ('x1',torch.unique(x),self.linear_text_k.)
#print ('linear_text_k',torch.unique(linear_text_k))
for i in range(self.N):
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#print (torch.unique(pointz),torch.unique(x))
pointz = torch.cat([pointz, x],2)
#print (torch.unique(position_sense_feat))
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l7
class generator_color(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(generator_color, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
d_model=32
self.linear_1 = nn.Linear(self.z_dim+self.point_dim+d_model, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
self.linear_8 = nn.Linear(self.gf_dim*1, 3, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
nn.init.normal_(self.linear_8.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_8.bias,0)
self.linear_text_k = nn.Linear(768, d_model, bias=True)
#self.linear_text_v = nn.Linear(768, d_model, bias=True)
self.linear_shape_q = nn.Linear(259, d_model, bias=True)
self.linear_final = nn.Linear(d_model, d_model, bias=True)
nn.init.normal_(self.linear_text_k.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_k.bias,0)
#nn.init.normal_(self.linear_text_v.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_text_v.bias,0)
nn.init.normal_(self.linear_shape_q.weight, mean=1e-5, std=0.02)
#nn.init.constant_(self.linear_shape_q.bias,0)
self.N=4
self.layers = get_clones(DecoderLayer(d_model, 4), self.N)
self.pe = PositionalEncoder(d_model)
#multihead_attn = nn.MultiheadAttention(embed_dim=16, num_heads=4)
#self.transformer_model = nn.Transformer(d_model=16, nhead=4, num_encoder_layers=0, num_decoder_layers=1, dim_feedforward=16)
'''self.softmax=torch.nn.Softmax(1)
dropout=0.1
self.softmax=torch.nn.Softmax(1)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
#self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(4, d_model)
self.ff = FeedForward(d_model).cuda()'''
def forward(self, points, z, texts, masks, is_training=False):
zs = z.view(-1,1,self.z_dim).repeat(1,points.size()[1],1)
pointz = torch.cat([points,zs],2)
pointz = torch.cat([points,zs],2)
#print (texts.shape, pointz.shape)
#print (torch.unique(points),torch.unique(zs))
linear_text_k = self.linear_text_k(texts)
#linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz.detach())
#print (linear_text_k.shape, linear_shape_q.shape)
'''att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
x=linear_shape_q
#linear_text_k = self.pe(linear_text_k)
#print ('generator color',torch.unique(x))
src_mask=masks
for i in range(self.N):
x = self.layers[i](x, linear_text_k, src_mask)
x=self.linear_final(x)/5.0
#print ('pointz',torch.unique(pointz))
#print ('x2',torch.unique(x))
#print (torch.unique(pointz) ,torch.unique(x))
#torch.nn.Transformer(d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', custom_encoder=None, custom_decoder=None)
#attn_output, attn_output_weights = multihead_attn(x, key, value)
#print (x.shape,linear_text_k.shape)
#x = self.transformer_model(torch.transpose(linear_text_k,0,1), torch.transpose(x,0,1) )
#print (x.shape)
#x=torch.transpose(x,0,1)
#print (torch.unique(pointz),torch.unique(x))
#print (masks.shape)
'''x =self.dropout_2(self.attn_2(linear_shape_q, linear_text_k, linear_text_v, src_mask))
x2 = self.norm_3(x)
x = self.dropout_3(self.ff(x2))'''
'''linear_text_k = self.linear_text_k(texts)
linear_text_v = self.linear_text_v(texts)
linear_shape_q = self.linear_shape_q(pointz)
att1=torch.einsum('btd,bsd->bts', linear_text_k, linear_shape_q) #b, t, s
att1=self.softmax(att1)
position_sense_feat=torch.einsum('bts,btd->bsd', att1, linear_text_v ) '''
pointz = torch.cat([pointz, x],2)
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
#l7 = self.linear_7(l6)
l8 = self.linear_8(l6)
#l7 = torch.clamp(l7, min=0, max=1)
#l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
l8 = torch.max(torch.min(l8, l8*0+1), l8*0)
#for i in range(4096):
# #print ('l8',l8[0,i,:])
return l8
class encoder(nn.Module):
def __init__(self, ef_dim, z_dim):
super(encoder, self).__init__()
self.ef_dim = ef_dim
self.z_dim = z_dim
self.conv_1 = nn.Conv3d(1+3, self.ef_dim, 4, stride=2, padding=1, bias=False)
self.in_1 = nn.InstanceNorm3d(self.ef_dim)
self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim*2, 4, stride=2, padding=1, bias=False)
self.in_2 = nn.InstanceNorm3d(self.ef_dim*2)
self.conv_3 = nn.Conv3d(self.ef_dim*2, self.ef_dim*4, 4, stride=2, padding=1, bias=False)
self.in_3 = nn.InstanceNorm3d(self.ef_dim*4)
self.conv_4 = nn.Conv3d(self.ef_dim*4, self.ef_dim*8, 4, stride=2, padding=1, bias=False)
self.in_4 = nn.InstanceNorm3d(self.ef_dim*8)
self.conv_5 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
self.conv_6 = nn.Conv3d(self.ef_dim*8, self.z_dim, 4, stride=1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv_1.weight)
nn.init.xavier_uniform_(self.conv_2.weight)
nn.init.xavier_uniform_(self.conv_3.weight)
nn.init.xavier_uniform_(self.conv_4.weight)
nn.init.xavier_uniform_(self.conv_5.weight)
nn.init.constant_(self.conv_5.bias,0)
nn.init.xavier_uniform_(self.conv_6.weight)
nn.init.constant_(self.conv_6.bias,0)
def forward(self, inputs, is_training=False):
#print ('input',inputs.shape)
d_1 = self.in_1(self.conv_1(inputs))
d_1 = F.leaky_relu(d_1, negative_slope=0.02, inplace=True)
d_2 = self.in_2(self.conv_2(d_1))
d_2 = F.leaky_relu(d_2, negative_slope=0.02, inplace=True)
d_3 = self.in_3(self.conv_3(d_2))
d_3 = F.leaky_relu(d_3, negative_slope=0.02, inplace=True)
d_4 = self.in_4(self.conv_4(d_3))
d_4 = F.leaky_relu(d_4, negative_slope=0.02, inplace=True)
d_5 = self.conv_5(d_4)
d_5 = d_5.view(-1, self.z_dim)
d_5 = torch.sigmoid(d_5)
d_6 = self.conv_6(d_4)
d_6 = d_6.view(-1, self.z_dim)
d_6 = torch.sigmoid(d_6)
return d_5, d_6
def initLinear(block, type='xavier_uniform'):
assert type in ['xavier_uniform']
nn.init.xavier_uniform_(block.weight, gain=0.02)
nn.init.constant_(block.bias, 0)
return block
'''class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.LayerNorm(in_channel,elementwise_affine=False) #InstanceNorm1d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
#print (style.shape,'style')
#input=input.unsqueeze(2)
style = self.style(style)#.unsqueeze(2) #.unsqueeze(3)
#print (style.shape)
gamma, beta = style.chunk(2, 1)
#print (style.shape, input.shape)
out = self.norm(input)
#print (out.shape, style.shape, gamma.shape, beta.shape)
out = gamma * out + beta
#out=out.squeeze(2)
#print (out.shape)
return out
class LatentGenerator(nn.Module):
def __init__(self, num_features, hidden=64, z_dim=64):
super().__init__()
self.fc1 = nn.Linear(num_features, num_features)
self.adain_1 = AdaptiveInstanceNorm(num_features, z_dim)
self.fc2 = nn.Linear(num_features, num_features)
self.adain_2 = AdaptiveInstanceNorm(num_features, z_dim)
self.fc3 = nn.Linear(num_features, num_features)
self.adain_3 = AdaptiveInstanceNorm(num_features, z_dim)
self.fc4 = nn.Linear(num_features, num_features)
self.adain_4 = AdaptiveInstanceNorm(num_features, z_dim)
self.fc5 = nn.Linear(num_features, num_features)
self.adain_5 = AdaptiveInstanceNorm(num_features, z_dim)
self.fc6 = nn.Linear(num_features, num_features)
fc1 = initLinear(self.fc1)
fc2 = initLinear(self.fc2)
fc3 = initLinear(self.fc3)
fc4 = initLinear(self.fc4)
fc5 = initLinear(self.fc5)
fc6 = initLinear(self.fc6)
layers=[]
for i in range(8):
layers.append(EqualLinear(z_dim, z_dim))
layers.append(nn.LeakyReLU(0.2))
self.mlp = nn.Sequential(*layers)
self.fc1 = spectral_norm(fc1)
self.fc2 = spectral_norm(fc2)
self.fc3 = spectral_norm(fc3)
self.fc4 = spectral_norm(fc4)
self.fc5 = spectral_norm(fc5)
self.fc6 = spectral_norm(fc6)
#self.norm1 = ConditionInjection(num_features, z_dim)
#self.norm2 = ConditionInjection(hidden, z_dim)
# self.norm3 = ConditionInjection(num_features, z_dim)
def forward(self, x, style):
#if z is None:
# z = torch.randn(x.size(0), self.condition_dim).float().cuda()
out = self.fc1(x)
style=self.mlp(style)
out = F.leaky_relu(out, .2)
#print ('1',out.shape, style.shape)
out = self.adain_1(out, style)
#out = self.norm1(out, z)
#print ('2',out.shape)
#print ('3',out.shape)
out = self.fc2(out)
out = F.leaky_relu(out, .2)
out = self.adain_2(out, style)
out = self.fc3(out)
out = F.leaky_relu(out, .2)
out = self.adain_3(out, style)
#out = self.norm2(out, z)
out = self.fc4(out)
out = F.leaky_relu(out, .2)
out = self.adain_4(out, style)
out = self.fc5(out)
out = F.leaky_relu(out, .2)
out = self.adain_5(out, style)
out = self.fc6(out)
out = F.sigmoid(out)
return out '''
class ConditionInjection(nn.Module):
def __init__(self, num_features, condition_dim=64, norm=nn.LayerNorm):
super().__init__()
if norm is not None:
if norm is nn.LayerNorm:
self.norm = norm(num_features, elementwise_affine=False)
elif norm is nn.BatchNorm1d:
self.norm = norm(num_features, affine=False)
else:
#print ('IN')
self.norm = norm(num_features, affine=False)
self.condition_dim = condition_dim
fc1 = nn.Linear(condition_dim, condition_dim)
fc1 = initLinear(fc1)
fc1 = spectral_norm(fc1)
fc2 = nn.Linear(condition_dim, num_features * 2)
fc2 = initLinear(fc2)
fc2 = spectral_norm(fc2)
self.projection = nn.Sequential(
fc1,
nn.LeakyReLU(.2),
fc2,
)
def forward(self, x, z=None): # x shape
if z is None:
z = torch.randn(x.size(0), self.condition_dim).float().cuda()
y = self.projection(z)
#print (x.shape, y.shape, z.shape)
for _ in range(x.dim() - y.dim()):
y = y.unsqueeze(-1)#.unsqueeze(-1)
gamma, beta = y.chunk(2, 1)
#print(x.shape, y.shape, gamma.shape, beta.shape)
#x=x.unsqueeze(-1)
#print (x.shape)
out = self.norm(x) if self.norm is not None else x
out = out * (1+gamma) + beta
#out=out.squeeze(-1)
return out
class LatentGenerator(nn.Module):
def __init__(self, num_features, hidden=64, z_dim=64):
super().__init__()
fc1 = nn.Linear(num_features, num_features)
fc2 = nn.Linear(num_features, num_features)
fc3 = nn.Linear(num_features, hidden)
fc4 = nn.Linear(hidden, hidden*2)
fc5 = nn.Linear(hidden*2, num_features)
fc6 = nn.Linear(num_features, num_features)
fc1 = initLinear(fc1)
fc2 = initLinear(fc2)
fc3 = initLinear(fc3)
fc4 = initLinear(fc4)
fc5 = initLinear(fc5)
fc6 = initLinear(fc6)
self.fc1 = spectral_norm(fc1)
self.fc2 = spectral_norm(fc2)
self.fc3 = spectral_norm(fc3)
self.fc4 = spectral_norm(fc4)
self.fc5 = spectral_norm(fc5)
self.fc6 = spectral_norm(fc6)
self.norm1 = ConditionInjection(num_features, z_dim)
self.norm2 = ConditionInjection(hidden, z_dim)
# self.norm3 = ConditionInjection(num_features, z_dim)
def forward(self, x, z=None):
if z is None:
z = torch.randn(x.size(0), self.condition_dim).float().cuda()
out = self.fc1(x)
out = self.norm1(out, z)
out = F.leaky_relu(out, .2)
out = self.fc2(out)
out = F.leaky_relu(out, .2)
out = self.fc3(out)
out = self.norm2(out, z)
out = F.leaky_relu(out, .2)
out = self.fc4(out)
out = F.leaky_relu(out, .2)
out = self.fc5(out)
out = F.leaky_relu(out, .2)
out = self.fc6(out)
out = F.sigmoid(out)
return out
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = 512
self.c_dim = 512
self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)
self.relu = nn.ReLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.cuda.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class im_network(nn.Module):
def __init__(self, ef_dim, gf_dim, z_dim, point_dim):
super(im_network, self).__init__()
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.z_dim = z_dim
self.point_dim = point_dim
self.encoder = encoder(self.ef_dim, self.z_dim)
pretrained_path='bert-base-uncased'
config = AutoConfig.from_pretrained(
str(pretrained_path), #num_labels=len(dataBunch.labels)
)
self.model = AutoModelForSequenceClassification.from_pretrained(
str(pretrained_path), config=config, state_dict=None
)
#self.ca=CA_NET()
self.net_g=LatentGenerator(512)
#self.net_g_color=LatentGenerator(256)
self.encoder = encoder(self.ef_dim, self.z_dim)
self.generator = generator(self.z_dim, self.point_dim, self.gf_dim)
self.generator_color = generator_color(self.z_dim, self.point_dim, self.gf_dim)
def forward(self, texts, masks, inputs, z_vector, z_vector_color, z_vector_c2,out_all,point_coord, words, is_training=False):
if texts!=None:
text_inputs = {
"input_ids": texts,
"attention_mask": masks,
}
if is_training:
#print ('traiing')
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
z_vector, _, z_vector_c2, words = self.model(**text_inputs)
'''num_noise=10
noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
noise2 = torch.randn(z_vector_c2.size(0)*num_noise, 64).float().to('cuda')
#print ('1', torch.unique(noise))
#print (z_vector.shape)
#print ('1x', torch.unique(torch.cat((z_vector, z_vector_c2), 1)), torch.unique(torch.cat((z_vector_std, z_vector_c_std), 1)))
z_in=torch.reshape(torch.unsqueeze(z_vector,1).repeat(1,num_noise,1), (-1, 256))
z_in2=torch.reshape(torch.unsqueeze(z_vector_c2,1).repeat(1,num_noise,1), (-1, 256))
#print ('2', z_in.shape, noise.shape)
#z_noise=torch.cat((z_in, noise), 2)
z_div=self.net_g(z_in, noise.detach())
z_div2=self.net_g_color(z_in2, noise2.detach())
#print (z_div.shape)
z_div=torch.reshape(z_div, (-1, num_noise, 256))
z_div2=torch.reshape(z_div2, (-1, num_noise, 256))
z_std=torch.unsqueeze(z_vector_std,1).repeat(1,num_noise,1)
z_std2=torch.unsqueeze(z_vector_c_std,1).repeat(1,num_noise,1)
#print ('4', torch.unique(z_std))
#print (z_div.shape, z_vector.shape)
diff=torch.sum(torch.abs(z_div-z_std),2)
diff2=torch.sum(torch.abs(z_div2-z_std2),2)
#print ('diff', diff.shape)
idx=torch.argmin(diff+diff2,1)
#print ('5', torch.unique(idx))
#z_best=z_div[:,idx,:]
#print (z_div.shape, z_best.shape, z_best)
idxs=torch.unsqueeze(torch.unsqueeze(idx,-1),-1)
idxs=idxs.repeat(1, num_noise, z_in.shape[-1])
#print ('idx, z div', idx.shape, z_div.shape, torch.unique(idx))
#print ('idxs', idxs.shape, idxs)
z_best=torch.gather(z_div, 1, idxs)[:,0,:]
z_best2=torch.gather(z_div2, 1, idxs)[:,0,:]
#print ('6', torch.unique(z_best))
#z_best=torch.index_select(z_div, 0, idx)
#print ('z best', z_best.shape, z_best)
z_vector=z_best #[:, :256]
#z_vector_c2=z_best2 #[:, 256:]
#print (z_vector_c2.shape, z_vector_c_std.shape)'''
num_noise=10
noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
#z_vector_ca, mu, var=self.ca(torch.cat((z_vector, z_vector_c2), 1))
#z_vector=z_vector_ca[:,:256]
#z_vector_c2=z_vector_ca[:,256:]
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
#print ('2', torch.unique(z_in))
#z_noise=torch.cat((z_in, noise), 2)
z_div=self.net_g(z_in, noise.detach())
#print ('3', torch.unique(z_div))
z_div=torch.reshape(z_div, (-1, num_noise, 512))
z_std=torch.unsqueeze(torch.cat((z_vector_std, z_vector_c_std), 1),1).repeat(1,num_noise,1)
#print ('4', torch.unique(z_std))
diff=torch.sum(torch.abs(z_div-z_std),2)
#print ('diff', diff.shape)
idx=torch.argmin(diff,1)
#print ('5', torch.unique(idx))
#z_best=z_div[:,idx,:]
#print (z_div.shape, z_best.shape, z_best)
idxs=torch.unsqueeze(torch.unsqueeze(idx,-1),-1)
idxs=idxs.repeat(1, num_noise, z_in.shape[-1])
#print ('idx, z div', idx.shape, z_div.shape, torch.unique(idx))
#print ('idxs', idxs.shape, idxs)
z_best=torch.gather(z_div, 1, idxs)[:,0,:]
#print ('6', torch.unique(z_best))
#z_best=torch.index_select(z_div, 0, idx)
#print ('z best', z_best.shape, z_best)
z_vector=z_best[:, :256]
z_vector_c2=z_best[:, 256:]
return z_vector,None, z_vector_c2, z_vector_std,None, z_vector_c_std, None, None, words
else:
if texts is not None:
z_vector,z_vector_color, z_vector_c2, words = self.model(**text_inputs)
#z_vector_ca, mu, var=self.ca(torch.cat((z_vector, z_vector_c2), 1))
#z_vector=z_vector_ca[:,:256]
#z_vector_c2=z_vector_ca[:,256:]
num_noise=3
noise = torch.randn(z_vector.size(0)*num_noise, 64).float().to('cuda')
z_in=torch.reshape(torch.unsqueeze(torch.cat((z_vector, z_vector_c2), 1),1).repeat(1,num_noise,1), (-1, 512))
z_div=self.net_g(z_in, noise.detach()*15)
z_vector=z_div[:, :256]
z_vector_c2=z_div[:, 256:]
return z_vector, None, z_vector_c2,None, None,None,None, words
if z_vector is not None and point_coord is not None:
net_out = self.generator(point_coord, z_vector, words, masks, is_training=is_training)
net_out_color = self.generator_color(point_coord, z_vector_c2, words, masks, is_training=is_training)
#print ('net out unique', torch.unique(net_out))
return None,None,None, net_out, net_out_color, None, None #, residue_color+s1_color, s1_color
#elif z_vector is not None and point_coord is not None:
# net_out = self.generator(point_coord, z_vector, is_training=is_training)
# return None,None,None, net_out, None,None,None,
elif (inputs is not None) and (inputs.shape[1]==4):
#z_vector_std, z_vector_color_std, z_vector_c2_std = self.encoder(inputs, is_training=is_training)
z_vector_std, z_vector_c_std = self.encoder(inputs, is_training=is_training)
return z_vector_std,None, z_vector_c_std,None, None,None,None #, net_out, None,None,None,
class IM_div(object):
def __init__(self, config):
#progressive training
#1-- (16, 16*16*16)
#2-- (32, 16*16*16)
#3-- (64, 16*16*16*4)
self.sample_vox_size = config.sample_vox_size
#print (self.sample_vox_size)
if self.sample_vox_size==16:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==32:
self.load_point_batch_size = 16*16*16
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==64:
self.load_point_batch_size = 16*16*16*4
self.point_batch_size = 16*16*16
self.shape_batch_size = 16
self.input_size = 64 #input voxel grid size
self.ef_dim = 32
self.gf_dim = 128
self.z_dim = 256
self.point_dim = 3
self.dataset_name = config.dataset
#self.dataset_load = self.dataset_name + '_train'
#self.data_paths=glob.glob('hdf5/*.hdf5') #/ccd5e*.hdf5')
self.datas=[]
#start=1
with open('train_official.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
#if start==1:
# start=0
# continue
text=row[2]
name=row[1]
self.datas.append((text,name))
#break
#for i in range(32):
# self.datas.append(self.datas[0])
if not (config.train):# or config.getz):
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
self.datas=[]
with open('test_official.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
text=row[2]
name=row[1]
text_str=row[0]
self.datas.append((text,name,text_str))
#self.data_paths.sort()
#self.dataset_load = self.dataset_name + '_test'
self.checkpoint_dir = config.checkpoint_dir
self.data_dir = config.data_dir
#data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
#self.data_paths=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5/*.hdf5')
#print ('data name lzz',data_hdf5_name)
'''if not (config.train or config.getz):
self.dataset_load = self.dataset_name + '_test'
data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
data_dict = h5py.File(data_hdf5_name, 'r')
print ('load')
self.data_points = (data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5
self.data_values = data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32)
self.data_colors = data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0
self.data_voxels = data_dict['voxels'][:]
self.data_voxels_colors = data_dict['voxels_colors'][:]/255.0
self.data_voxels_colors = np.transpose(self.data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors = np.reshape(self.data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size])
#reshape to NCHW
self.data_voxels = np.reshape(self.data_voxels, [-1,1,self.input_size,self.input_size,self.input_size])
#else:
# print("error: cannot load "+data_hdf5_name)
# exit(0)'''
#print ('loaded')
if torch.cuda.is_available():
self.device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device('cpu')
#build model
self.im_network = im_network(self.ef_dim, self.gf_dim, self.z_dim, self.point_dim)
self.im_network.to(self.device)
#print params
for param_tensor in self.im_network.model.parameters():
param_tensor.requires_grad=False
for param_tensor in self.im_network.encoder.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
for param_tensor in self.im_network.generator_color.parameters():
param_tensor.requires_grad=False #print(param_tensor, "\t", self.im_network.state_dict()[param_tensor].size())
self.optimizer = torch.optim.Adam(self.im_network.parameters(), lr=0.001, betas=(config.beta1, 0.999))
#self.optimizer = self.get_optimizer(0.001, optimizer_type="lamb")
#self.optimizer = torch.optim.Adam([{'params': base_params}, {'params': self.im_network.model.parameters(), 'lr': 0.001}], lr=config.learning_rate*1, betas=(config.beta1, 0.999))
#self.scheduler = self.get_scheduler(
# self.optimizer, t_total=int(60470*config.epoch), schedule_type="warmup_cosine"
#)
#pytorch does not have a checkpoint manager
#have to define it myself to manage max num of checkpoints to keep
self.max_to_keep = 2
self.checkpoint_path = os.path.join(self.checkpoint_dir, self.model_dir)
self.checkpoint_name='div.model'
self.checkpoint_manager_list = [None] * self.max_to_keep
self.checkpoint_manager_pointer = 0
#loss
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
self.KL_loss=KL_loss
def network_loss(G,point_value):
return torch.mean((G-point_value)**2)
self.loss = network_loss
def color_loss(G,point_color,mask):
return torch.mean(((G-point_color)*mask)**2)
self.color_loss = color_loss
#keep everything a power of 2
self.cell_grid_size = 4
self.frame_grid_size = 64
self.real_size = self.cell_grid_size*self.frame_grid_size #=256, output point-value voxel grid size in testing
self.test_size = 32 #related to testing batch_size, adjust according to gpu memory size
self.test_point_batch_size = self.test_size*self.test_size*self.test_size #do not change
self.test_point_batch_size_in_training=4096
#get coords for training
dima = self.test_size
dim = self.frame_grid_size
self.aux_x = np.zeros([dima,dima,dima],np.uint8)
self.aux_y = np.zeros([dima,dima,dima],np.uint8)
self.aux_z = np.zeros([dima,dima,dima],np.uint8)
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
for i in range(dima):
for j in range(dima):
for k in range(dima):
self.aux_x[i,j,k] = i*multiplier
self.aux_y[i,j,k] = j*multiplier
self.aux_z[i,j,k] = k*multiplier
self.coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
self.coords[i*multiplier2+j*multiplier+k,:,:,:,0] = self.aux_x+i
self.coords[i*multiplier2+j*multiplier+k,:,:,:,1] = self.aux_y+j
self.coords[i*multiplier2+j*multiplier+k,:,:,:,2] = self.aux_z+k
self.coords = (self.coords.astype(np.float32)+0.5)/dim-0.5
self.coords = np.reshape(self.coords,[multiplier3,self.test_point_batch_size,3])
self.coords = torch.from_numpy(self.coords)
self.coords = self.coords.to(self.device)
#get coords for testing
dimc = self.cell_grid_size
dimf = self.frame_grid_size
self.cell_x = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_y = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_z = np.zeros([dimc,dimc,dimc],np.int32)
self.cell_coords = np.zeros([dimf,dimf,dimf,dimc,dimc,dimc,3],np.float32)
self.frame_coords = np.zeros([dimf,dimf,dimf,3],np.float32)
self.frame_coords_train = torch.zeros([16,16,16,3]).cuda()
self.frame_x = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_y = np.zeros([dimf,dimf,dimf],np.int32) #.long()
self.frame_z = np.zeros([dimf,dimf,dimf],np.int32) #.long()
for i in range(dimc):
for j in range(dimc):
for k in range(dimc):
self.cell_x[i,j,k] = i
self.cell_y[i,j,k] = j
self.cell_z[i,j,k] = k
for i in range(dimf):
for j in range(dimf):
for k in range(dimf):
self.cell_coords[i,j,k,:,:,:,0] = self.cell_x+i*dimc
self.cell_coords[i,j,k,:,:,:,1] = self.cell_y+j*dimc
self.cell_coords[i,j,k,:,:,:,2] = self.cell_z+k*dimc
self.frame_coords[i,j,k,0] = i
self.frame_coords[i,j,k,1] = j
self.frame_coords[i,j,k,2] = k
self.frame_x[i,j,k] = i
self.frame_y[i,j,k] = j
self.frame_z[i,j,k] = k
for i in range(16):
for j in range(16):
for k in range(16):
self.frame_coords_train[i,j,k,0] = i
self.frame_coords_train[i,j,k,1] = j
self.frame_coords_train[i,j,k,2] = k
self.cell_coords = (self.cell_coords.astype(np.float32)+0.5)/self.real_size-0.5
self.cell_coords = np.reshape(self.cell_coords,[dimf,dimf,dimf,dimc*dimc*dimc,3])
self.cell_x = np.reshape(self.cell_x,[dimc*dimc*dimc])
self.cell_y = np.reshape(self.cell_y,[dimc*dimc*dimc])
self.cell_z = np.reshape(self.cell_z,[dimc*dimc*dimc])
self.frame_x = np.reshape(self.frame_x,[dimf*dimf*dimf])
self.frame_y = np.reshape(self.frame_y,[dimf*dimf*dimf])
self.frame_z = np.reshape(self.frame_z,[dimf*dimf*dimf])
self.frame_coords = (self.frame_coords+0.5)/dimf-0.5
self.frame_coords = np.reshape(self.frame_coords,[dimf*dimf*dimf,3])
self.frame_coords_train = (self.frame_coords_train+0.5)/16.0-0.5
self.frame_coords_train = torch.reshape(self.frame_coords_train,[16*16*16,3])
#self.conv_edge = nn.Conv3d(3, 3, 3, stride=1, padding=1, groups=3, bias=False)
#self.conv_edge.to(self.device)
self.sampling_threshold = 0.5 #final marching cubes threshold
self.upsample=nn.Upsample(scale_factor=4,mode='trilinear').cuda()
def get_optimizer(self, lr, optimizer_type="lamb"):
# Prepare optimiser and schedule
no_decay = [] #"bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.im_network.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.0, #self.weight_decay,
},
{
"params": [
p
for n, p in self.im_network.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
if optimizer_type == "lamb":
optimizer = Lamb(optimizer_grouped_parameters, lr=lr, eps=1e-8)
elif optimizer_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8
)
return optimizer
def get_scheduler(self, optimizer, t_total, schedule_type="warmup_cosine"):
SCHEDULES = {
"warmup_cosine": get_cosine_schedule_with_warmup,
}
if schedule_type == None or schedule_type == "none":
return SCHEDULES[schedule_type](optimizer)
elif schedule_type == "warmup_constant":
return SCHEDULES[schedule_type](
optimizer, num_warmup_steps=0 #self.warmup_steps
)
else:
return SCHEDULES[schedule_type](
optimizer,
num_warmup_steps=0, #self.warmup_steps,
num_training_steps=t_total,
)
def z2voxel(self, z, z_color, words, masks, config):
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_,_, model_out_, color_out_,_,_ = self.im_network(None,masks,None, z,None, z_color,None, point_coord, words, is_training=False)
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
if config.high_resolution:
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_,_, model_out_batch_, color_out_batch_,_,_ = self.im_network(None, masks,None,z,None,z_color,None, cell_coords, words, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, frame_flag, color_cube
@property
def model_dir(self):
return "{}_ae_{}".format(self.dataset_name, self.input_size)
def train(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
print (checkpoint_txt)
if 1: #os.path.exists(checkpoint_txt):
pass
#fin = open(checkpoint_txt)
#model_dir = fin.readline().strip()
'''
model=torch.load(model_dir)
model2={}
for k in model.keys():
if 'encoder' in k or 'generator' in k:
continue
model2[k]=model[k]
self.im_network.load_state_dict(model2,strict=False)'''
#model_dir='../merge-nocyclic-multi-att-ori/checkpoint/color_all_ae_64/IM_AE.model64-334_raw.pth'
model_dir= config.initialize #'checkpoint/color_all_ae_64/IM_AE.model64-199_raw.pth' #'/mnt/sda/lzz/merge-nocyclic-multi-att-ori/checkpoint/color_all_ae_64/IM_AE.model64-199.pth'
self.im_network.load_state_dict(torch.load(model_dir),strict=False)
#model_dir='init149.pth'
#self.im_network.load_state_dict(torch.load(model_dir),strict=False)
#print(" [*] Load SUCCESS",model_dir)
else:
print(" [!] Load failed...")
shape_num = len(self.datas)
batch_index_list = np.arange(shape_num)
print("\n\n----------net summary----------")
print("training samples ", shape_num)
print("-------------------------------\n\n")
start_time = time.time()
assert config.epoch==0 or config.iteration==0
training_epoch = config.epoch + int(config.iteration/shape_num)
batch_num = int(shape_num/self.shape_batch_size)
point_batch_num = int(self.load_point_batch_size/self.point_batch_size)
#print ('xxxxxxxxxxxxxxxxxxxxxxxxxx')
for epoch in range(0, training_epoch): #int(model_dir.split('/')[-1].split('-')[-1].split('_')[0])
self.im_network.train()
np.random.shuffle(batch_index_list)
avg_loss_sp = 0
avg_loss_kl = 0
avg_loss_color = 0
avg_loss_color2 = 0
avg_loss_value = 0
avg_value_out =0
avg_color_out =0
avg_value_out_std =0
avg_color_out_std =0
avg_loss_value_rec =0
avg_loss_color2_rec =0
avg_num = 0
self.data_points=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_values=np.zeros((self.shape_batch_size,self.load_point_batch_size,1))
self.data_colors=np.zeros((self.shape_batch_size,self.load_point_batch_size,3))
self.data_voxels=np.zeros((self.shape_batch_size,1,64,64,64))
self.data_voxels_colors=np.zeros((self.shape_batch_size,3,64,64,64))
#self.pred_voxels=torch.zeros((self.shape_batch_size,1,64,64,64)).to(self.device)
#self.pred_voxels_colors=torch.zeros((self.shape_batch_size,3,64,64,64)).to(self.device)
for idx in range(batch_num):
#print (idx)
dxb = batch_index_list[idx*self.shape_batch_size:(idx+1)*self.shape_batch_size]
#print (dxb)
self.data_points[:]=0
self.data_values[:]=0
self.data_colors[:]=0
self.data_voxels[:]=0
self.data_voxels_colors[:]=0
#self.pred_voxels[:]=0
#self.pred_voxels_colors[:]=0
batch_paths=np.asarray(self.datas)[dxb]
texts=np.zeros((batch_paths.shape[0], 64))
masks=np.zeros((batch_paths.shape[0], 64))
for b in range(batch_paths.shape[0]): #path in batch_paths:
text_list=batch_paths[b][0].split(' ')[:-1] #.astype('int')
text_array = np.asarray(list(map(int, text_list)))
path='../hdf5_train_new/'+batch_paths[b][1]+'.hdf5'
name=batch_paths[b][1]
data_dict = h5py.File(path, 'r')
self.data_points[b,:,:]=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values[b,:,:]=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors[b,:,:]=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
texts[b,:min(64,len(text_list))]=text_array[:min(64,len(text_list))]
masks[b,:min(64,len(text_list))]=1
#print (self.data_points.shape,self.data_values.shape, self.data_colors.shape)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors[b,:,:,:,:]=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels[b,:,:,:,:]=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
#print ('datapoints', data_dict['points_'+str(self.sample_vox_size)].shape, self.data_points.shape)
batch_voxels = self.data_voxels.astype(np.float32) #[dxb].astype(np.float32)
batch_voxels_colors = self.data_voxels_colors.astype(np.float32) # [dxb].astype(np.float32)
if point_batch_num==1:
point_coord = self.data_points#[dxb]
point_value = self.data_values#[dxb]
point_color = self.data_colors#[dxb]
else:
which_batch = 0 #np.random.randint(point_batch_num)
point_coord = self.data_points[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size] #[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_value = self.data_values[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
point_color = self.data_colors[which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]#[dxb][which_batch*self.point_batch_size:(which_batch+1)*self.point_batch_size]
batch_voxels = torch.from_numpy(batch_voxels).float()
batch_voxels_colors = torch.from_numpy(batch_voxels_colors).float()
#step=1 #round(batch_voxels_colors.shape[-1]/self.sample_vox_size)
#print (step)
#batch_voxels_colors_16=batch_voxels_colors[:,:,0:64:step,0:64:step,0:64:step].to(self.device)
#print ('voxels color 16',batch_voxels_colors_16.shape)
point_coord = torch.from_numpy(point_coord).float()
point_value = torch.from_numpy(point_value).float()
point_color = torch.from_numpy(point_color).float()
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = batch_voxels_colors.to(self.device)
point_coord = point_coord.to(self.device)
point_value = point_value.to(self.device)
point_color = point_color.to(self.device)
texts=torch.from_numpy(texts).to(self.device).long()
masks=torch.from_numpy(masks).to(self.device).bool()
self.im_network.zero_grad()
z_vector,z_vector_color, z_vector_c2, z_vector_std, z_vector_color_std, z_vector_color2_std, net_out, residue_color, words = self.im_network(texts,masks, torch.cat((batch_voxels,batch_voxels_colors),1), None,None,None,None, point_coord, None, is_training=True)
frame_batch_num = 1
point_coord = self.frame_coords_train
point_coord = torch.unsqueeze(point_coord, 0)
point_coord = point_coord.repeat(z_vector.shape[0],1,1)
#_,_,_,model_out, color_out, color_final , color_s1 = self.im_network(None, None, None, z_vector, z_vector_color, z_vector_c2,None,point_coord, is_training=False)
'''_,_,_,model_out,color_final,_,_ = self.im_network(None, masks, None, z_vector, z_vector_color, z_vector_c2, None, point_coord, words, is_training=False)
model_out[torch.where(model_out>self.sampling_threshold)]=1
model_out[torch.where(model_out<=self.sampling_threshold)]=0
model_out=torch.reshape(model_out, (-1,1,16,16,16))
pred_shape=self.upsample(model_out) #self.pred_voxels[:]=
#_,_,_,_, color_out_, color_final , color_s1 = self.im_network(None, None,pred_shape, z_vector, z_vector_color, z_vector_c2,None, point_coord, is_training=False)
#print (color_out_.shape)
color_final=torch.transpose(color_final,1,2)
color_final=torch.reshape(color_final, (-1,3,16,16,16))
pred_color=self.upsample(color_final) #self.pred_voxels_colors[:]
pred_color[:,0,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
pred_color[:,1,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
pred_color[:,2,:,:,:][torch.where(pred_shape[:,0,:,:,:]==0)]=0
z_vector_rec, z_vector_c2_rec =self.im_network.encoder(torch.cat((pred_shape, pred_color),1), is_training=False)'''
#z_vector_rec=z_vector_rec.detach()
#z_vector_c2_rec=z_vector_c2_rec.detach()
#kl_loss = self.KL_loss(mu, var)*0.01
errSP_value = self.loss(z_vector, z_vector_std)*2
errSP_color2 = self.loss(z_vector_c2, z_vector_color2_std)*1.0
#errSP_value_out = self.loss(net_out, point_value)
#point_value3_2=point_value.repeat(1,1,3)
#errSP_color_out = self.color_loss(residue_color, point_color, point_value3_2)*10.0
'''errSP_value_out_std = self.loss(net_out_std, point_value)
errSP_color_out_std = self.color_loss(residue_color_std, point_color, point_value3_2)*10.0
errSP_value_rec = self.loss(z_vector_rec, z_vector_std)*0.02
errSP_color2_rec = self.loss(z_vector_c2_rec, z_vector_color2_std)*0.01'''
errSP=errSP_value+ errSP_color2 #+kl_loss #+ errSP_value_out_std+errSP_color_out_std + errSP_value_rec + errSP_color2_rec# +errSP_value_rec+errSP_color_rec+errSP_color2_rec +errSP_value_rec_text +errSP_color_rec_text +errSP_color2_rec_text
errSP.backward()
#nn.utils.clip_grad_norm(list(self.im_network.generator_color.parameters())+list(self.im_network.dalle.parameters()) , 0.05)
#torch.nn.utils.clip_grad_norm_(
# self.im_network.parameters(), 1
#)
self.optimizer.step()
#avg_loss_kl += kl_loss.item()
avg_loss_value += errSP_value.item()
avg_loss_color2 += errSP_color2.item()
'''avg_value_out_std += errSP_value_out_std.item()
avg_color_out_std += errSP_color_out_std.item()
avg_loss_value_rec += errSP_value_rec.item()
#avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()'''
'''avg_loss_value_rec += errSP_value_rec.item()
avg_loss_color_rec += errSP_color_rec.item()
avg_loss_color2_rec += errSP_color2_rec.item()
avg_loss_value_rec_text += errSP_value_rec_text.item()
avg_loss_color_rec_text += errSP_color_rec_text.item()
avg_loss_color2_rec_text += errSP_color2_rec_text.item()'''
avg_loss_sp += errSP.item()
avg_num += 1
print(str(self.sample_vox_size)+" Epoch: [%2d/%2d] time: %4.4f,loss_value_sp: %.6f, loss_color_sp: %.6f, loss_sp: %.6f" % (epoch, training_epoch, time.time() - start_time,avg_loss_value/avg_num, avg_loss_color2/avg_num, avg_loss_sp/avg_num))
if epoch%20==19:
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+"_raw.pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
#if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
# if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
# os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
save_dir = os.path.join(self.checkpoint_path,self.checkpoint_name+str(self.sample_vox_size)+"-"+str(epoch)+".pth")
self.checkpoint_manager_pointer = (self.checkpoint_manager_pointer+1)%self.max_to_keep
#delete checkpoint
if self.checkpoint_manager_list[self.checkpoint_manager_pointer] is not None:
if os.path.exists(self.checkpoint_manager_list[self.checkpoint_manager_pointer]):
os.remove(self.checkpoint_manager_list[self.checkpoint_manager_pointer])
#save checkpoint
torch.save(self.im_network.state_dict(), save_dir)
#update checkpoint manager
self.checkpoint_manager_list[self.checkpoint_manager_pointer] = save_dir
#write file
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
fout = open(checkpoint_txt, 'w')
for i in range(self.max_to_keep):
pointer = (self.checkpoint_manager_pointer+self.max_to_keep-i)%self.max_to_keep
if self.checkpoint_manager_list[pointer] is not None:
fout.write(self.checkpoint_manager_list[pointer]+"\n")
fout.close()
color_cube_float = np.zeros([3, self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) #258*258*258
conf = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
#print (model_float.shape)
dimc = self.cell_grid_size #4
dimf = self.frame_grid_size #64
frame_flag = np.zeros([dimf+2,dimf+2,dimf+2],np.uint8)
color_cube = np.ones([3,dimf+2,dimf+2,dimf+2]).astype('float32')
queue = []
frame_batch_num = int(dimf**3/self.test_point_batch_size) #8
assert frame_batch_num>0
#print (dimf #64, dimf**3,262144, self.test_point_batch_size, 32768 , frame_batch_num 8)
#get frame grid values
for i in range(frame_batch_num):
point_coord = self.frame_coords[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
point_coord = np.expand_dims(point_coord, axis=0)
point_coord = torch.from_numpy(point_coord)
point_coord = point_coord.to(self.device)
_,_, model_out_, color_out_ = self.im_network(None, z, z_color, point_coord, is_training=False)
#print ('cube 0',torch.unique(color_out_.detach()))
#print ('model out', model_out_.shape, color_out_.shape) torch.Size([1, 32768, 1]) torch.Size([1, 32768, 3])
model_out = model_out_.detach().cpu().numpy()[0]
color_out_ = color_out_.detach().cpu().numpy()[0]
#print (color_out_.shape)
color_out = np.transpose(color_out_,(1,0))
x_coords = self.frame_x[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
y_coords = self.frame_y[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
z_coords = self.frame_z[i*self.test_point_batch_size:(i+1)*self.test_point_batch_size]
#print (frame_flag.shape, x_coords,y_coords,z_coords, x_coords+1, y_coords+1,z_coords+1)
#print (model_out.shape, color_out.shape, self.test_point_batch_size, color_flag[:,x_coords,y_coords,z_coords].shape) (32768, 1) (32768, 3) 32768 (3, 32768)
frame_flag[x_coords+1,y_coords+1,z_coords+1] = np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]) #66,66,66
conf[x_coords+1,y_coords+1,z_coords+1] = np.reshape(model_out.astype(float), [self.test_point_batch_size])
color_cube[:,x_coords+1,y_coords+1,z_coords+1] = np.reshape(color_out, [3, self.test_point_batch_size]) #66,66,66
#print (x_coords,y_coords,z_coords,x_coords.shape,y_coords.shape,z_coords.shape)
#print ('cube 1',color_out.shape, np.reshape((model_out>self.sampling_threshold).astype(np.uint8), [self.test_point_batch_size]).shape, np.reshape(color_out, [3, self.test_point_batch_size]).shape, np.unique(color_cube), color_cube[:,x_coords,y_coords,z_coords].shape, frame_flag[x_coords+1,y_coords+1,z_coords+1].shape)
#get queue and fill up ones
for i in range(1,dimf+1):
for j in range(1,dimf+1):
for k in range(1,dimf+1):
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
#model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print (i,j,k,color_cube[0,i,j,k]*255,color_cube[1,i,j,k]*255,color_cube[2,i,j,k]*255)
maxv = np.max(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
minv = np.min(frame_flag[i-1:i+2,j-1:j+2,k-1:k+2])
if maxv!=minv:
queue.append((i,j,k))
elif maxv==1:
x_coords = self.cell_x+(i-1)*dimc
#print ('xcorrds',x_coords,self.cell_x, i-1, dimc)
#print ('cellx,dimc',self.cell_x, dimc) cellx,dimc [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3] 4
y_coords = self.cell_y+(j-1)*dimc
z_coords = self.cell_z+(k-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = 1.0
#print (color_cube[:,i,j,k].shape, color_cube_float[:,x_coords+1,y_coords+1,z_coords+1])
#color_cube_float[0,x_coords+1,y_coords+1,z_coords+1] = color_cube[0,i,j,k]
#color_cube_float[1,x_coords+1,y_coords+1,z_coords+1] = color_cube[1,i,j,k]
#color_cube_float[2,x_coords+1,y_coords+1,z_coords+1] = color_cube[2,i,j,k]
#print ('c',color_cube[:,i,j,k], color_cube[:,i,j,k].shape)
cell_batch_size = dimc**3
cell_batch_num = int(self.test_point_batch_size/cell_batch_size)
assert cell_batch_num>0
#run queue
while len(queue)>0:
batch_num = min(len(queue),cell_batch_num)
point_list = []
cell_coords = []
for i in range(batch_num):
point = queue.pop(0)
point_list.append(point)
cell_coords.append(self.cell_coords[point[0]-1,point[1]-1,point[2]-1])
cell_coords = np.concatenate(cell_coords, axis=0)
cell_coords = np.expand_dims(cell_coords, axis=0)
cell_coords = torch.from_numpy(cell_coords)
cell_coords = cell_coords.to(self.device)
_,_, model_out_batch_, color_out_batch_ = self.im_network(None, z,z_color, cell_coords, is_training=False)
model_out_batch = model_out_batch_.detach().cpu().numpy()[0]
color_out_batch = color_out_batch_.detach().cpu().numpy()[0]
for i in range(batch_num):
point = point_list[i]
#print (model_out_batch.shape, color_out_batch.shape)
model_out = model_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,0]
#color_out = color_out_batch[i*cell_batch_size:(i+1)*cell_batch_size,:]
#print ('color out',color_out.shape)
x_coords = self.cell_x+(point[0]-1)*dimc
y_coords = self.cell_y+(point[1]-1)*dimc
z_coords = self.cell_z+(point[2]-1)*dimc
model_float[x_coords+1,y_coords+1,z_coords+1] = model_out
#for c in range(3):
# color_cube_float[c,x_coords+1,y_coords+1,z_coords+1] = color_out[:,c]
if np.max(model_out)>self.sampling_threshold:
for i in range(-1,2):
pi = point[0]+i
if pi<=0 or pi>dimf: continue
for j in range(-1,2):
pj = point[1]+j
if pj<=0 or pj>dimf: continue
for k in range(-1,2):
pk = point[2]+k
if pk<=0 or pk>dimf: continue
if (frame_flag[pi,pj,pk] == 0):
frame_flag[pi,pj,pk] = 1
queue.append((pi,pj,pk))
return model_float, color_cube_float, color_cube
#output shape as ply and point cloud as ply
def test_mesh_point(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if 1: #os.path.exists(checkpoint_txt):
#model_dir='checkpoint/color_all_ae_64/IM_AE.model16-29_raw.pth'
model_dir='checkpoint/color_all_ae_64/div.model64-149.pth'
models=torch.load(model_dir)
self.im_network.load_state_dict(torch.load(model_dir),strict=True)
#model_dir='../merge-cyclic-multi-att/checkpoint/color_all_ae_64/IM_AE.model64-209_raw.pth' #IM_AE.model32-199_save_from150.pth'
#self.im_network.load_state_dict(torch.load(model_dir),strict=False)
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#print (self.im_network)
#self.im_network.model.dropout.train()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
idx=0
for data in self.datas[config.start:config.end]:
text_list=data[0].split(' ')[:-1] #.astype('int')
text_array = np.asarray(list(map(int, text_list)))
#print (data[1])
#if '539548' not in data[1]: #c3b6c, ad174 73b369
# continue
path='../hdf5_test_new/'+data[1]+'.hdf5'
data_dict = h5py.File(path, 'r')
#path=glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5_test/cd942*')[0]
name=path.split('/')[-1]
#print (name)
#if os.path.exists("val/"+str(name)+str(data[2][:50])+"_mesh_pred.ply"):
# continue
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels_colors))
texts=np.zeros((1, 32))
masks=np.zeros((1, 32))
texts[0,:min(32,len(text_list))]=text_array[:min(32,len(text_list))]
masks[0,:min(32,len(text_list))]=1
texts=torch.from_numpy(texts).to(self.device).long()
masks=torch.from_numpy(masks).to(self.device).bool()
model_zs,_, z_vector_c2s, _,_,_,_, words= self.im_network(texts, masks, None, None,None, None,None, None,None, is_training=False)
for idx in range(model_zs.shape[0]):
#print (idx)
model_z=model_zs[idx,:]
z_vector_c2=z_vector_c2s[idx,:]
#start=time.time()
model_float, color_cube_float, frame_flag, color_cube = self.z2voxel(model_z, z_vector_c2, words, texts, config)
#print (time.time()-start)
from plyfile import PlyData,PlyElement
some_array=[]
size=258
for i in range(1,64):
for j in range(1,64):
for k in range(1,64):
if frame_flag[1:-1,1:-1,1:-1][int(i),int(j),int(k)]>0.5:
some_array.append((i,j,k,color_cube[2,int(i),int(j),int(k)]*255,color_cube[1,int(i),int(j),int(k)]*255,color_cube[0,int(i),int(j),int(k)]*255))
some_array = np.array(some_array, dtype=[('x', 'float32'), ('y', 'float32'), ('z', 'float32'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8')])
el = PlyElement.describe(some_array, 'vertex')
PlyData([el]).write('result/div/'+name+str(data[2][:50].replace('/',' '))+str(idx)+'test_new_input.ply')
#continue
shape64=torch.unsqueeze(torch.unsqueeze(torch.from_numpy(frame_flag).cuda(),0),0)
color64=torch.unsqueeze(torch.from_numpy(color_cube).cuda(),0)
color64[:,0,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
color64[:,1,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
color64[:,2,:,:,:][torch.where(shape64[:,0,:,:,:]==0)]=0
cube_float64=torch.cat((shape64,color64),1)[:,:,1:-1,1:-1,1:-1] #[:,:,0:256:4,0:256:4,0:256:4]
model_z_shape, _, z_vector_c2_shape,_,_,_,_ = self.im_network(None,None, cube_float64, None,None, None,None, None, words, is_training=False)
#print (model_z.shape, z_vector_c2.shape)
text_feat=torch.cat((model_z.unsqueeze(0),z_vector_c2.unsqueeze(0)),1).detach().cpu().numpy()
shape_feat=torch.cat((model_z_shape,z_vector_c2_shape),1).detach().cpu().numpy()
#print (text_feat.shape, shape_feat.shape)
np.save('result/div/shape_feat/'+data[1]+'_'+str(data[2][:50].replace('/',' '))+str(idx)+'.npy', shape_feat)
np.save('result/div/text_feat/'+data[1]+'_'+str(data[2][:50].replace('/',' '))+str(idx)+'.npy', text_feat)
model_pad=np.zeros((66,66,66))
model_pad[1:-1,1:-1,1:-1]=frame_flag[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_pad, self.sampling_threshold)
x = np.linspace(0, 66, 66)
y = np.linspace(0, 66, 66)
z = np.linspace(0, 66, 66)
#color_cube[:,1:-1,1:-1,1:-1]=color_cube
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
#color_cube[:,1:-1,1:-1,1:-1]=self.data_voxels_colors[0,:,:,:,:]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("result/div/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_pred"+str(idx)+".ply", vertices, triangles, colors)
sampled_points_normals = sample_points_triangle(vertices, triangles, 2048)
vertices_tensor=torch.from_numpy(vertices.astype(np.float32)).cuda()
sampled_points_normals_int=sampled_points_normals.astype('int')
#print (sampled_points_normals.shape, np.unique(sampled_points_normals_int[:,:3]), np.unique(sampled_points_normals[:,3:] ) )
colors=color_cube[:,sampled_points_normals_int[:,0],sampled_points_normals_int[:,1],sampled_points_normals_int[:,2]]
write_ply_point_normal("result/div/"+str(name)+str(data[2][:50].replace('/',' '))+"_pc"+str(idx)+".ply", sampled_points_normals, colors)
if config.high_resolution:
model_pad=np.zeros((258,258,258))
model_pad[1:-1,1:-1,1:-1]= model_float[1:-1,1:-1,1:-1] #model_float[1:-1:4,1:-1:4,1:-1:4]
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
x = np.linspace(0, 258,258)
y = np.linspace(0, 258,258)
z = np.linspace(0, 258,258)
color_cube=color_cube_float
color_cube[:,0,:,:]=color_cube[:,1,:,:]
color_cube[:,:,0,:]=color_cube[:,:,1,:]
color_cube[:,:,:,0]=color_cube[:,:,:,1]
color_cube[:,-1,:,:]=color_cube[:,-2,:,:]
color_cube[:,:,-1,:]=color_cube[:,:,-2,:]
color_cube[:,:,:,-1]=color_cube[:,:,:,-2]
my_interpolating_function0 = RegularGridInterpolator((x, y, z), color_cube[0,:,:,:],method='nearest') #_float[0,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function1 = RegularGridInterpolator((x, y, z), color_cube[1,:,:,:],method='nearest') #_float[1,1:-1:4,1:-1:4,1:-1:4])
my_interpolating_function2 = RegularGridInterpolator((x, y, z), color_cube[2,:,:,:],method='nearest') #_float[2,1:-1:4,1:-1:4,1:-1:4])
color0=my_interpolating_function0(vertices)
color1=my_interpolating_function1(vertices)
color2=my_interpolating_function2(vertices)
colors=np.zeros((color0.shape[0],3))
colors[:,0]=color0
colors[:,1]=color1
colors[:,2]=color2
write_ply_triangle("result/div/"+str(name)+str(data[2][:50].replace('/',' '))+"_mesh_258_"+str(idx)+".ply", vertices, triangles, colors)
def get_z(self, config):
#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
model_dir='checkpoint/color_all_ae_64/IM_AE.model16-199_raw.pth'
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS", model_dir)
else:
print(" [!] Load failed...")
return
self.im_network.eval()
#for t in range(config.start, min(len(self.data_voxels),config.end)):
for path in glob.glob('/mnt/sdb/lzz/transform/IM-NET-pytorch/point_sampling/hdf5_train/*.hdf5'): #self.data_paths: #[config.start:config.end]:
print (path)
name=path.split('/')[-1]
data_dict = h5py.File(path, 'r')
self.data_points=((data_dict['points_'+str(self.sample_vox_size)][:].astype(np.float32)+0.5)/256-0.5)
self.data_values=(data_dict['values_'+str(self.sample_vox_size)][:].astype(np.float32))
self.data_colors=(data_dict['colors_'+str(self.sample_vox_size)][:].astype(np.float32)/255.0)
tmp_data_voxels_colors = data_dict['voxels_colors'][:]/255.0
tmp_data_voxels_colors = np.transpose(tmp_data_voxels_colors, (0,4,1,2,3))
self.data_voxels_colors=(np.reshape(tmp_data_voxels_colors, [-1,3,self.input_size,self.input_size,self.input_size]))
self.data_voxels=(np.reshape(data_dict['voxels'][:], [-1,1,self.input_size,self.input_size,self.input_size]))
t=0
batch_voxels_ = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels_)
batch_voxels = batch_voxels.to(self.device)
batch_voxels_colors = self.data_voxels_colors[t:t+1].astype(np.float32)
batch_voxels_colors = torch.from_numpy(batch_voxels_colors)
batch_voxels_colors = batch_voxels_colors.to(self.device)
#print (torch.unique(batch_voxels_colors))
#z_vector, _, _ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None, None, is_training=False)
#model_z,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
model_z,z_vector_color,_,_ = self.im_network(torch.cat((batch_voxels,batch_voxels_colors),1), None,None, None, is_training=False)
z=model_z.detach().cpu().numpy()
z_vector_color=z_vector_color.detach().cpu().numpy()
#print (z.shape, z_vector_color.shape)
z=np.concatenate((z,z_vector_color),1)
print (z.shape)
np.save('../feat32_color_train/'+name+'.npy',z)
'''#load previous checkpoint
checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint")
if os.path.exists(checkpoint_txt):
fin = open(checkpoint_txt)
model_dir = fin.readline().strip()
fin.close()
self.im_network.load_state_dict(torch.load(model_dir))
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
hdf5_path = self.checkpoint_dir+'/'+self.model_dir+'/'+self.dataset_name+'_train_z.hdf5'
shape_num = len(self.data_voxels)
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("zs", [shape_num,self.z_dim], np.float32)
self.im_network.eval()
#print(shape_num)
for t in range(shape_num):
batch_voxels = self.data_voxels[t:t+1].astype(np.float32)
batch_voxels = torch.from_numpy(batch_voxels)
batch_voxels = batch_voxels.to(self.device)
out_z,_ ,_= self.im_network(batch_voxels, None, None, is_training=False)
hdf5_file["zs"][t:t+1,:] = out_z.detach().cpu().numpy()
hdf5_file.close()
print("[z]")'''
def test_z(self, config, batch_z, dim):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
return
for t in range(batch_z.shape[0]):
model_z = batch_z[t:t+1]
model_z = torch.from_numpy(model_z)
model_z = model_z.to(self.device)
model_float = self.z2voxel(model_z)
#img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
#img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
#img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
#cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
#print (model_float)
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
vertices = (vertices.astype(np.float32)-0.5)/self.real_size-0.5
#vertices = self.optimize_mesh(vertices,model_z)
write_ply(config.sample_dir+"/"+"out"+str(t)+".ply", vertices, triangles)
print("[sample Z]")
| [] |
2024-01-10 | ashmeet13/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | navicstein/resume-checker | resume_checker.py | import os
import logging
import argparse
from datetime import datetime
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from langchain_community.chat_models import ChatOpenAI
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.llms.ollama import Ollama
from langchain_community.llms.together import Together
from langchain_core.callbacks import StreamingStdOutCallbackHandler, CallbackManager
from prompts import resume_checker_prompt, check_output_parser, resume_cover_letter_prompt
load_dotenv()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s]: %(message)s",
handlers=[
logging.FileHandler("resume_logger.log"),
logging.StreamHandler()
]
)
def trim_space(text: str) -> str:
return text.replace("\n", " ").replace("\r", " ").replace("\t", " ")
class ResumeAnalyser:
""" ResumeAnalyser takes in a path to your resume and a job posting URL and lets you know if you're fit for the
job or not"""
llm = None
resume_content: str = ""
job_content: str = ""
def __init__(self, resume_path: str, job_posting_url: str):
self.resume_path = resume_path
self.job_posting_url = job_posting_url
model_name = os.getenv("LLM_MODEL")
if model_name == "openai":
self.llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106")
elif model_name == "ollama":
self.llm = Ollama(
model="mistral",
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
elif model_name == "together":
self.llm = Together(
model="mistralai/Mistral-7B-Instruct-v0.2",
temperature=0.7,
top_k=1,
)
else:
raise ValueError("Invalid LLM model name")
self.load_resume(self.resume_path)
self.load_job_site(self.job_posting_url)
def load_resume(self, path):
""" loads the resume text from the resume path """
loader = PyPDFLoader(path)
docs = loader.load()
for doc in docs:
self.resume_content += trim_space(doc.page_content)
def load_job_site(self, url):
""" loads the job text from the job site url """
loader = WebBaseLoader(url)
docs = loader.load()
for doc in docs:
self.job_content += trim_space(doc.page_content)
def run(self):
""" Runs the program """
check_prompt_str = resume_checker_prompt.format(resume=self.resume_content, job_description=self.job_content)
logging.debug(check_prompt_str)
output = self.llm.predict(check_prompt_str)
result = check_output_parser.parse(output)
score, value, explanation, fixes = result["score"], result["value"], result["explanation"], result["fixes"]
print(f"""
Job fit: {value}
Explanation: {explanation}
Your Resume Score is: {score}
""")
if value is True:
self.generate_cover_letter()
else:
print(f"""
You're not fit for this role, but here's a possible way to improve your resume:
{fixes}
""")
def generate_cover_letter(self):
""" generate cover letter for the job"""
cover_letter_prompt_str = resume_cover_letter_prompt.format(resume=self.resume_content,
job_description=self.job_content)
cover_letter = self.llm.predict(cover_letter_prompt_str)
logging.debug(cover_letter_prompt_str)
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"cover_letter_{timestamp}.txt"
filename = os.path.join("cover_letters", filename)
with open(filename, "w") as f:
f.write(cover_letter)
logging.info(f"Cover letter generated at {filename}")
print(f"""
Here's an example cover letter you can use
{cover_letter}
""")
| [] |
2024-01-10 | haneyume/llm-test | chainlit_mrkl.py | import chainlit as cl
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain import OpenAI, LLMMathChain, SerpAPIWrapper
from dotenv import load_dotenv
load_dotenv()
@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0, streaming=True)
llm1 = OpenAI(temperature=0, streaming=True)
search = SerpAPIWrapper()
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events. You should ask targeted questions",
),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
),
]
agent = initialize_agent(
tools, llm1, agent="chat-zero-shot-react-description", verbose=True
)
cl.user_session.set("agent", agent)
@cl.on_message
async def main(message):
agent = cl.user_session.get("agent") # type: AgentExecutor
cb = cl.LangchainCallbackHandler(stream_final_answer=True)
await cl.make_async(agent.run)(message, callbacks=[cb])
| [] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~whisper.py | import os
import openai
import requests # type: ignore
import tempfile
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, Any
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
openai.api_key = os.getenv("OPENAI_API_KEY", "")
whisper: Task = Task(ai_devs_api_key, "whisper")
token: str = whisper.auth()
content: Dict[str, Any] = whisper.get_content(token)
file_url: str = "https://zadania.aidevs.pl/data/mateusz.mp3"
response: requests.Response = requests.get(file_url)
if response.status_code == 200:
# Create a temporary file to save the MP3 content
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
# Write the MP3 content to the temporary file
temp_file.write(response.content)
temp_file.flush()
temp_file.seek(0) # Reset file pointer to the beginning
with open(temp_file.name, "rb") as audio_file:
transcript: str = openai.Audio.transcribe("whisper-1", audio_file)["text"]
answer_payload: Dict[str, str] = {"answer": transcript}
task_result: Dict[str, Any] = whisper.post_answer(token, answer_payload)
print(task_result)
else:
print("Failed to download the MP3 file")
| [] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~moderation.py | import os
from dotenv import load_dotenv
from ai_devs_task import Task
from openai import OpenAI
from typing import Dict, Any, List
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
moderation: Task = Task(ai_devs_api_key, "moderation")
token: str = moderation.auth()
task_content: Dict[str, Any] = moderation.get_content(token)
sentence_list: List[str] = task_content["input"]
moderation_response = client.moderations.create(input=sentence_list)
moderation_results = moderation_response.results
results_list: List[int] = [1 if result.flagged else 0 for result in moderation_results]
answer_payload: Dict[str, List[int]] = {"answer": results_list}
task_result: Dict[str, Any] = moderation.post_answer(token, answer_payload)
print(task_result)
| [] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~gnome.py | import os
from openai import OpenAI
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, Any
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
gnome: Task = Task(ai_devs_api_key, "gnome")
token: str = gnome.auth()
task_content: Dict[str, Any] = gnome.get_content(token)
url: str = task_content["url"]
prompt: str = """
Simply answer the question with just the color name: what is the color of the gnome's hat?
Answer in polish.
If the does not show a gnome with a hat, answer with "ERROR".
"""
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": f"{prompt}"},
{
"type": "image_url",
"image_url": {
"url": f"{url}",
},
},
],
}
],
max_tokens=300,
)
answer = response.choices[0].message.content or ""
answer_payload: Dict[str, str] = {"answer": answer}
task_result: Dict[str, Any] = gnome.post_answer(token, answer_payload)
print(task_result)
| [
"\nSimply answer the question with just the color name: what is the color of the gnome's hat?\nAnswer in polish.\nIf the does not show a gnome with a hat, answer with \"ERROR\".\n",
"[{'type': 'text', 'text': '\\nSimply answer the question with just the color name: what is the color of the gnome\\'s hat?\\nAnswer in polish.\\nIf the does not show a gnome with a hat, answer with \"ERROR\".\\n'}, {'type': 'image_url', 'image_url': {'url': 'PLACEHOLDER'}}]"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~people.py | import os
import requests # type: ignore
from openai import OpenAI
import uuid
from dotenv import load_dotenv
from ai_devs_task import Task
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from langchain.embeddings import OpenAIEmbeddings
from typing import Dict, Any
SOURCE_URL = "https://zadania.aidevs.pl/data/people.json"
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
people: Task = Task(ai_devs_api_key, "people")
token: str = people.auth()
task_content: Dict[str, Any] = people.get_content(token)
qdrant = QdrantClient("localhost", port=6333)
embeddings = OpenAIEmbeddings()
COLLECTION_NAME = "people_task"
collections = qdrant.get_collections()
collection_names = [element.name for element in collections.collections]
if not (COLLECTION_NAME in collection_names):
qdrant.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
on_disk_payload=True
)
collection_info = qdrant.get_collection(collection_name=COLLECTION_NAME)
if collection_info.points_count == 0:
people_data = requests.get(SOURCE_URL).json()
for element in people_data:
name = f"{element['imie']} {element['nazwisko']}"
element_copy = element.copy()
if "imie" in element_copy:
del element_copy["imie"]
if "nazwisko" in element_copy:
del element_copy["nazwisko"]
metadata = {
"source": COLLECTION_NAME,
"content": element_copy,
"id": uuid.uuid4().hex,
}
point = name
point_id = metadata["id"]
point_vector = embeddings.embed_query(point)
point_struct = {"id": point_id, "payload": metadata, "vector": point_vector}
qdrant.upsert(
collection_name=COLLECTION_NAME,
wait=True,
points=[point_struct]
)
prompt_name: str = """
Extract the name and surname of the person from the text below.
Answer with name and surname only.
Examples:
Ulubiony kolor Agnieszki Rozkaz, to?
expected: Agnieszka Rozkaz
"""
name_surname = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt_name},
{"role": "user", "content": task_content["question"]}
]
)
name_surname_string = name_surname.choices[0].message.content or ""
query_embedding = embeddings.embed_query(name_surname_string)
search_result = qdrant.search(
collection_name=COLLECTION_NAME,
query_vector=query_embedding,
limit=1,
query_filter={
"must": [
{
"key": "source",
"match": {
"value": COLLECTION_NAME
}
}
]
}
)
info = str(search_result[0].payload["content"]) # type: ignore
prompt_info: str = f"""
Answer the question about the person based on the text below
{info}
"""
api_response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt_info},
{"role": "user", "content": task_content["question"]}
]
)
answer = api_response.choices[0].message.content or ""
answer_payload: Dict[str, str] = {"answer": answer}
task_result: Dict[str, Any] = people.post_answer(token, answer_payload)
print(task_result)
| [
"\nAnswer the question about the person based on the text below\nPLACEHOLDER\n",
"question",
"\nExtract the name and surname of the person from the text below.\nAnswer with name and surname only.\nExamples:\nUlubiony kolor Agnieszki Rozkaz, to?\nexpected: Agnieszka Rozkaz\n"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~whoami.py | import os
import json
from openai import OpenAI
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, Any, List, Tuple
# TODO add typing, refactor!!!
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
whoami: Task = Task(ai_devs_api_key, "whoami")
context: List[str] = []
prompt: str = """
You are trying to guess the name of the person based on the hints given below.
Answer shortly with name and surname the question only if you are certain.
If you are not certain answer with "HINT"
### Hints:
"""
def guess(task: Task, context: List[str], prompt: str) -> Tuple[str, str]:
token: str = task.auth()
content: Dict[str, Any] = task.get_content(token)
hint: str = content["hint"]
context.append(hint)
enriched_prompt: str = enrich_prompt(prompt, context)
check_answer = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": enriched_prompt},
{"role": "user", "content": "Who am I?"}
]
)
return (token, check_answer.choices[0].message.content or "")
def enrich_prompt(prompt: str, context: List[str]) -> str:
for hint in context:
prompt += f"- {hint}\n"
return prompt
def function_calling(query: str):
function_descriptions: List[Dict[str, Any]] = [
{
"name": "post_answer",
"description": "If input is a name post answer",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Guessed name of the person",
}
},
"required": ["name"]
},
},
{
"name": "ask_for_hint",
"description": "If input is 'HINT' ask for another hint",
"parameters": {
"type": "object",
"properties": {
"hint": {
"type": "string",
"description": "",
}
}
},
}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[{"role": "user", "content": query}],
functions=function_descriptions # type: ignore
)
response_message = response.choices[0].message.function_call or ""
return response_message
def solve():
response = guess(whoami, context, prompt)
token, guessed_answer = response
response = function_calling(guessed_answer)
return (token, response)
while True:
token, function_call = solve()
if function_call["name"] == "post_answer":
name = json.loads(function_call["arguments"])["name"]
answer_payload: Dict[str, str] = {"answer": name}
response = whoami.post_answer(token, answer_payload={"answer": function_call["arguments"]})
print(response)
break
elif function_call["name"] == "ask_for_hint":
token, function_call = solve()
else:
print("Something went wrong")
break
| [
"- PLACEHOLDER\n",
"Who am I?",
"\nYou are trying to guess the name of the person based on the hints given below.\nAnswer shortly with name and surname the question only if you are certain.\nIf you are not certain answer with \"HINT\"\n\n### Hints:\n"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~scraper.py | import os
from openai import OpenAI
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, Any
from helpers import send_request
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
scraper: Task = Task(ai_devs_api_key, "scraper")
token: str = scraper.auth()
task_content: Dict[str, Any] = scraper.get_content(token)
url: str = task_content["input"]
question: str = task_content["question"]
response_text: str = send_request("GET", url)
system: str = f"""
You answer the question concisely, in one sentence.
Answer using the following knowledge:
{response_text}
"""
answer = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system},
{"role": "user", "content": question}
]
)
task_answer: str = answer.choices[0].message.content or ""
answer_payload: Dict[str, str] = {"answer": task_answer}
task_result: Dict[str, Any] = scraper.post_answer(token, answer_payload)
print(task_result)
| [
"\nYou answer the question concisely, in one sentence.\nAnswer using the following knowledge:\nPLACEHOLDER\n"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~knowledge.py | import os
import json
from openai import OpenAI
import requests # type: ignore
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, List, Any
def get_exchange_rate(currency: str) -> float:
url: str = f"http://api.nbp.pl/api/exchangerates/rates/a/{currency}/?format=json"
response: Dict[str, Any] = requests.get(url).json()
return response["rates"][0]["mid"]
def get_population(country: str) -> int:
url: str = f"https://restcountries.com/v3.1/name/{country}"
response: List[Dict] = requests.get(url).json()
return response[0]["population"]
def function_calling(query: str):
function_descriptions: List[Dict[str, Any]] = [
{
"name": "get_exchange_rate",
"description": "If question is about exchange rate",
"parameters": {
"type": "object",
"properties": {
"currency": {
"type": "string",
"description": "currency name",
}
},
"required": ["currency"]
},
},
{
"name": "get_population",
"description": "If question is about country's population",
"parameters": {
"type": "object",
"properties": {
"country": {
"type": "string",
"description": "country name",
}
}
},
"required": ["country"]
}
]
response = client.chat.completions.create(
model="gpt-4-0613",
messages=[{"role": "user", "content": query}],
functions=function_descriptions # type: ignore
)
response_message = response.choices[0].message
if response_message.function_call:
return response_message.function_call # type: ignore
else:
return response_message # type: ignore
if __name__ == "__main__":
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
knowledge: Task = Task(ai_devs_api_key, "knowledge")
token: str = knowledge.auth()
task_content: Dict[str, Any] = knowledge.get_content(token)
question: str = task_content["question"]
fcall = function_calling(question)
if hasattr(fcall, 'content'):
answer = fcall.content
elif fcall.name == "get_exchange_rate":
arguments = json.loads(fcall.arguments)
currency = arguments["currency"]
answer = get_exchange_rate(currency)
elif fcall.name == "get_population":
arguments = json.loads(fcall.arguments)
country = arguments["country"]
answer = get_population(country)
answer_payload: Dict[str, str] = {"answer": answer}
task_result: Dict[str, Any] = knowledge.post_answer(token, answer_payload)
print(task_result)
| [] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~rodo.py | import os
import openai
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, Any
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
openai.api_key = os.getenv("OPENAI_API_KEY", "")
rodo: Task = Task(ai_devs_api_key, "rodo")
token: str = rodo.auth()
task_content: Dict[str, Any] = rodo.get_content(token)
message: str = task_content["msg"]
user_prompt: str = """
Act secuirty aware, using placeholders instead or real data.
Please tell me about yoursefl.
Use following placeholders:
%imie%, %nazwisko%, %zawod%, %miasto%
"""
answer_payload: Dict[str, str] = {"answer": user_prompt}
result: Dict[str, Any] = rodo.post_answer(token, answer_payload)
print(result)
| [
"\nAct secuirty aware, using placeholders instead or real data.\nPlease tell me about yoursefl.\nUse following placeholders:\n%imie%, %nazwisko%, %zawod%, %miasto%\n"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~liar.py | import os
from openai import OpenAI
from dotenv import load_dotenv
from ai_devs_task import Task
from typing import Dict, Any
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
liar: Task = Task(ai_devs_api_key, "liar")
token: str = liar.auth()
task_content: Dict[str, Any] = liar.get_content(token)
question: Dict[str, str] = {"question": "What is the capital of Poland?"}
answer_json: Dict[str, Any] = liar.post_question(token, question)
answer: str = answer_json["answer"]
prompt: str = """
Answer simply YES or NO
Is it a correct answer to the following question:
"What is the capital of Poland?"
"""
check_answer = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": answer}
]
)
answer_content: str = check_answer.choices[0].message.content or ""
result_payload: Dict[str, str] = {"answer": answer_content}
result: Dict[str, Any] = liar.post_answer(token, result_payload)
print(result)
| [
"\nAnswer simply YES or NO\nIs it a correct answer to the following question:\n\"What is the capital of Poland?\"\n"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~blogger.py | import os
from typing import List, Dict, Any
from openai import OpenAI
from dotenv import load_dotenv
from ai_devs_task import Task
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
blogger: Task = Task(ai_devs_api_key, "blogger")
token: str = blogger.auth()
task_content: Dict[str, Any] = blogger.get_content(token)
prompt: str = """
You are a pizza master that writes a blog about pizza in polish.
Write a short paragraph about the given topic.
"""
result: List[str] = []
blog_topics: List[str] = task_content["blog"]
for topic in blog_topics:
blog_article = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": topic}
]
)
result.append(blog_article.choices[0].message.content or "")
answer_payload: Dict[str, List[str]] = {"answer": result}
task_result: Dict[str, Any] = blogger.post_answer(token, answer_payload)
print(task_result)
| [
"\nYou are a pizza master that writes a blog about pizza in polish.\nWrite a short paragraph about the given topic.\n"
] |
2024-01-10 | janbanot/ai_devs2 | api_tasks~inprompt.py | import os
from openai import OpenAI
import re
from typing import Dict, Any, List
from dotenv import load_dotenv
from ai_devs_task import Task
load_dotenv()
ai_devs_api_key: str = os.getenv("AI_DEVS_API_KEY", "")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", ""))
inprompt: Task = Task(ai_devs_api_key, "inprompt")
token: str = inprompt.auth()
task_content: Dict[str, Any] = inprompt.get_content(token)
knowledge_dict: Dict[str, str] = {}
input: List[str] = task_content["input"]
for entry in input:
words: List[str] = entry.split()
name: str = words[0]
knowledge_dict[name] = entry
question: str = task_content["question"]
name_pattern: str = r"\b[A-Z][a-z]*\b"
subject: str = re.findall(name_pattern, question)[0]
subject_info: str = knowledge_dict[subject]
prompt: str = f"""
Answer the question shortly using only the information given below:
{subject_info}
"""
model_response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": question}
]
)
task_answer: str = model_response.choices[0].message.content or ""
answer_payload: Dict[str, str] = {"answer": task_answer}
task_result: Dict[str, Any] = inprompt.post_answer(token, answer_payload)
print(task_result)
| [
"inprompt",
"\nAnswer the question shortly using only the information given below:\nPLACEHOLDER\n"
] |
2024-01-10 | onepointconsulting/chainlit-sept-2023 | backend~chainlit~langchain~callbacks.py | import json
from typing import Any, Dict, List, Optional, Union
from chainlit import input_widget
from chainlit.config import config
from chainlit.context import context
from chainlit.message import ErrorMessage, Message
from chainlit.prompt import Prompt, PromptMessage
from chainlit.sync import run_sync
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult
IGNORE_LIST = [] # type: List[str]
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
def get_llm_settings(invocation_params: Union[Dict, None], serialized: Dict[str, Any]):
if invocation_params is None:
return None, None
provider = invocation_params.pop("_type", "") # type: str
if provider.startswith("openai"):
model_name = invocation_params.pop("model_name")
invocation_params["model"] = model_name
model_kwargs = invocation_params.pop("model_kwargs", {})
merged = {
**invocation_params,
**model_kwargs,
**serialized.get("kwargs", {}),
}
# make sure there is no api key specification
settings = {k: v for k, v in merged.items() if not k.endswith("_api_key")}
return provider, settings
def build_prompt(serialized: Dict[str, Any], inputs: Dict[str, Any]):
prompt_params = serialized.get("kwargs", {}).get("prompt", {}).get("kwargs", {})
_messages = prompt_params.get("messages")
if _messages:
messages = []
for m in _messages:
class_name = m["id"][-1]
# A placeholder holds a variable that itself is a list of messages, like chat_history
if class_name == "MessagesPlaceholder":
variable_name = m.get("kwargs", {}).get("variable_name") # type: str
variable = inputs.get(variable_name, [])
placeholder_size = len(variable)
if placeholder_size:
messages += [PromptMessage(placeholder_size=placeholder_size)]
else:
m_prompt_params = (
m.get("kwargs", {}).get("prompt", {}).get("kwargs", {})
)
m_template = m_prompt_params.get("template")
m_template_format = m_prompt_params.get("template_format")
messages += [
PromptMessage(
template=m_template,
template_format=m_template_format,
role=convert_role(class_name),
)
]
else:
messages = None
template = prompt_params.get("template")
template_format = prompt_params.get("template_format")
stringified_inputs = inputs = {k: str(v) for (k, v) in inputs.items()}
if template:
return Prompt(
template=template,
template_format=template_format,
inputs=stringified_inputs,
)
elif messages:
return Prompt(inputs=stringified_inputs, messages=messages)
def convert_role(role: str):
if role in ["human", "chat", "HumanMessagePromptTemplate"]:
return "user"
elif role in ["system", "SystemMessagePromptTemplate"]:
return "system"
elif role in ["ai", "AIMessagePromptTemplate"]:
return "assistant"
elif role in ["function", "FunctionMessagePromptTemplate"]:
return "function"
else:
raise ValueError(f"Unsupported role {role}")
def convert_message(message: BaseMessage, template: Optional[str] = None):
function_call = message.additional_kwargs.get("function_call")
if function_call:
content = json.dumps(function_call, indent=4)
else:
content = message.content
return PromptMessage(
name=getattr(message, "name", None),
role=convert_role(message.type),
template=template,
formatted=content,
)
class BaseLangchainCallbackHandler(BaseCallbackHandler):
# Keep track of the prompt sequence
prompt_sequence: List[Prompt]
# Keep track of the call sequence, like [AgentExecutor, LLMMathChain, Calculator, ...]
sequence: List[Message]
# Keep track of the currently streamed message for the session
stream: Union[Message, None]
# The stream we can use to stream the final answer from a chain
final_stream: Union[Message, None]
# Message at the root of the chat we should attach child messages to
root_message: Message
# Should we stream the final answer?
stream_final_answer: bool = False
# Token sequence that prefixes the answer
answer_prefix_tokens: List[str]
# Ignore white spaces and new lines when comparing answer_prefix_tokens to last tokens? (to determine if answer has been reached)
strip_tokens: bool
# Should answer prefix itself also be streamed?
stream_prefix: bool
raise_error = True
# We want to handler to be called on every message
always_verbose: bool = True
def __init__(
self,
*,
answer_prefix_tokens: Optional[List[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
stream_final_answer: bool = False,
root_message: Optional[Message] = None,
) -> None:
self.sequence = []
self.prompt_sequence = []
self.stream = None
if root_message:
self.root_message = root_message
elif root_message := context.session.root_message:
self.root_message = root_message
else:
self.root_message = Message(author=config.ui.name, content="")
run_sync(self.root_message.send())
# Langchain final answer streaming logic
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
# Our own final answer streaming logic
self.stream_final_answer = stream_final_answer
self.final_stream = None
self.has_streamed_final_answer = False
@property
def current_prompt(self):
if self.prompt_sequence:
return self.prompt_sequence[-1]
else:
return None
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def _compare_last_tokens(self, last_tokens: List[str]):
if last_tokens == self.answer_prefix_tokens_stripped:
# If tokens match perfectly we are done
return True
else:
# Some LLMs will consider all the tokens of the final answer as one token
# so we check if any last token contains all answer tokens
return any(
[
all(
answer_token in last_token
for answer_token in self.answer_prefix_tokens_stripped
)
for last_token in last_tokens
]
)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self._compare_last_tokens(self.last_tokens_stripped)
else:
return self._compare_last_tokens(self.last_tokens)
def start_stream(self):
author = self.get_author()
if author in IGNORE_LIST:
return
parent_id = self.get_last_message().parent_id
self.stream = self.create_message(
prompt=self.current_prompt, author=author, parent_id=parent_id
)
def end_stream(self):
self.stream = None
def add_in_sequence(self, message: Message):
self.sequence.append(message)
def pop_sequence(self):
if self.sequence:
return self.sequence.pop()
def get_author(self):
if self.sequence:
return self.sequence[-1].author
return config.ui.name
def get_last_message(self):
for message in reversed(self.sequence):
if message.author not in IGNORE_LIST:
return message
return self.root_message
def create_error(self, error: Exception):
if isinstance(error, InterruptedError):
return None
return ErrorMessage(content=str(error), author=self.get_author())
def create_message(
self,
content: str = "",
prompt: Optional[Prompt] = None,
author: Optional[str] = None,
parent_id: Optional[str] = None,
):
if parent_id is None:
last_message = self.get_last_message()
parent_id = last_message.id
return Message(
content,
author=author or self.get_author(),
prompt=prompt,
parent_id=parent_id,
)
def _on_chat_model_start(
self: BaseLangchainCallbackHandler,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
):
invocation_params = kwargs.get("invocation_params")
provider, settings = get_llm_settings(invocation_params, serialized)
formatted_messages = messages[0]
if self.current_prompt:
self.current_prompt.provider = provider
self.current_prompt.settings = settings
# Chat mode
if self.current_prompt.messages:
# This is needed to compute the correct message index to read
placeholder_offset = 0
# The final list of messages
prompt_messages = []
# Looping the messages built in build_prompt
# They only contain the template
for templated_index, templated_message in enumerate(
self.current_prompt.messages
):
# If a message has a placeholder size, we need to replace it
# With the N following messages, where N is the placeholder size
if templated_message.placeholder_size:
for _ in range(templated_message.placeholder_size):
formatted_message = formatted_messages[
templated_index + placeholder_offset
]
prompt_messages += [convert_message(formatted_message)]
# Increment the placeholder offset
placeholder_offset += 1
# Finally, decrement the placeholder offset by one
# Because the message representing the placeholder is now consumed
placeholder_offset -= 1
# The current message is not a placeholder
else:
formatted_message = formatted_messages[
templated_index + placeholder_offset
]
# Update the role and formatted value, keep the template
prompt_messages += [
convert_message(
formatted_message, template=templated_message.template
)
]
# Finally set the prompt messages
self.current_prompt.messages = prompt_messages
# Non chat mode
elif self.current_prompt.template:
unique_message = messages[0][0]
prompt_message = convert_message(
unique_message, template=self.current_prompt.template
)
self.current_prompt.messages = [prompt_message]
self.current_prompt.template = None
# No current prompt, create it (formatted only)
else:
prompt_messages = [convert_message(m) for m in messages[0]]
self.prompt_sequence.append(
Prompt(
messages=prompt_messages,
provider=provider,
settings=settings,
)
)
def _on_llm_start(
self: BaseLangchainCallbackHandler,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> None:
invocation_params = kwargs.get("invocation_params")
provider, settings = get_llm_settings(invocation_params, serialized)
if self.current_prompt:
self.current_prompt.formatted = prompts[0]
self.current_prompt.provider = provider
self.current_prompt.settings = settings
else:
self.prompt_sequence.append(
Prompt(
formatted=prompts[0],
provider=provider,
settings=settings,
)
)
class LangchainCallbackHandler(BaseLangchainCallbackHandler, BaseCallbackHandler):
def on_error(self, error, **_):
if error := self.create_error(error):
run_sync(error.send())
self.pop_sequence()
on_tool_error = on_error
on_llm_error = on_error
on_chain_error = on_error
def send_token(self, token: str, final: bool = False):
stream = self.final_stream if final else self.stream
if stream:
run_sync(stream.stream_token(token))
self.has_streamed_final_answer = final
def add_message(self, message: Message):
if message.author in IGNORE_LIST:
return
if self.stream:
run_sync(self.stream.send())
self.end_stream()
else:
run_sync(message.send())
# Callbacks for various events
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
_on_llm_start(self, serialized, prompts, **kwargs)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
_on_chat_model_start(self, serialized, messages, **kwargs)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if not self.stream:
self.start_stream()
self.send_token(token)
if not self.stream_final_answer:
return
self.append_to_last_tokens(token)
if self.answer_reached:
if not self.final_stream:
self.final_stream = Message(author=config.ui.name, content="")
self.send_token(token, final=True)
else:
self.answer_reached = self.check_if_answer_reached()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if response.llm_output is not None:
if "token_usage" in response.llm_output:
token_usage = response.llm_output["token_usage"]
if "total_tokens" in token_usage:
run_sync(
context.emitter.update_token_count(token_usage["total_tokens"])
)
if self.current_prompt:
self.current_prompt.completion = response.generations[0][0].text
if self.final_stream:
run_sync(self.final_stream.send())
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
prompt = build_prompt(serialized, inputs)
if prompt:
self.prompt_sequence.append(prompt)
message = self.create_message(author=serialized["id"][-1])
self.add_in_sequence(message)
self.add_message(message)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
output_key = list(outputs.keys())[0]
if output_key:
parent_id = self.get_last_message().parent_id
message = self.create_message(
outputs[output_key], self.current_prompt, parent_id=parent_id
)
self.add_message(message)
if self.prompt_sequence:
self.prompt_sequence.pop()
self.pop_sequence()
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
message = self.create_message(author=serialized["name"])
self.add_in_sequence(message)
self.add_message(message)
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
parent_id = self.get_last_message().parent_id
message = self.create_message(output, None, parent_id=parent_id)
self.add_message(message)
self.pop_sequence()
def on_text(self, text: str, **kwargs: Any) -> None:
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
pass
class AsyncLangchainCallbackHandler(BaseLangchainCallbackHandler, AsyncCallbackHandler):
async def on_error(self, error, **_):
if error := self.create_error(error):
await error.send()
self.pop_sequence()
on_tool_error = on_error
on_llm_error = on_error
on_chain_error = on_error
async def send_token(self, token: str, final: bool = False):
stream = self.final_stream if final else self.stream
if stream:
await stream.stream_token(token)
self.has_streamed_final_answer = final
async def add_message(self, message: Message):
if message.author in IGNORE_LIST:
return
if self.stream:
await self.stream.send()
self.end_stream()
else:
await message.send()
# Callbacks for various events
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
_on_llm_start(self, serialized, prompts, **kwargs)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
_on_chat_model_start(self, serialized, messages, **kwargs)
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if not self.stream:
self.start_stream()
await self.send_token(token)
if not self.stream_final_answer:
return
self.append_to_last_tokens(token)
if self.answer_reached:
if not self.final_stream:
self.final_stream = Message(author=config.ui.name, content="")
await self.send_token(token, final=True)
else:
self.answer_reached = self.check_if_answer_reached()
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if response.llm_output is not None:
if "token_usage" in response.llm_output:
token_usage = response.llm_output["token_usage"]
if "total_tokens" in token_usage:
await context.emitter.update_token_count(
token_usage["total_tokens"]
)
if self.current_prompt:
self.current_prompt.completion = response.generations[0][0].text
if self.final_stream:
await self.final_stream.send()
async def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
prompt = build_prompt(serialized, inputs)
if prompt:
self.prompt_sequence.append(prompt)
message = self.create_message(author=serialized["id"][-1])
self.add_in_sequence(message)
await self.add_message(message)
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
output_key = list(outputs.keys())[0]
if output_key:
parent_id = self.get_last_message().parent_id
message = self.create_message(
outputs[output_key], prompt=self.current_prompt, parent_id=parent_id
)
await self.add_message(message)
if self.prompt_sequence:
self.prompt_sequence.pop()
self.pop_sequence()
async def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
message = self.create_message(author=serialized["name"])
self.add_in_sequence(message)
await self.add_message(message)
async def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
parent_id = self.get_last_message().parent_id
message = self.create_message(output, parent_id=parent_id)
await self.add_message(message)
self.pop_sequence()
async def on_text(self, text: str, **kwargs: Any) -> None:
pass
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
pass
| [
"[]",
"template_format"
] |
2024-01-10 | AutoResearch/AOS | sweetPeaEnglishTranslator~promt_scrambling~text_paraphrasing.py | import os
import openai
from dotenv import load_dotenv
import random
load_dotenv()
openai.api_key = os.getenv('OPENAI_KEY')
INSTRUCTIONS = ['Rephrase this', 'Reword this']
TEXT_CHUNKS = ['There are two regular factors: color and word.',
'The color factor consists of four levels: "red", "green", "blue", "brown".The word factor also consists of the four levels: "red", "green", "blue", "brown".',
'We counterbalanced the color factor with the word factor.',
'The experiment sequences were generated subject to the constraint that no congruent trials were included.',
'All experiment sequences contained at least 20 trials and were sampled uniformly from the space of all counterbalanced sequences.']
def gpt3_paraphrase(text, instr):
response = openai.Edit.create(
model='text-davinci-edit-001',
input=text,
instruction=instr,
)
return response.choices[0]['text']
chunk = random.choice(TEXT_CHUNKS)
instruction = random.choice(INSTRUCTIONS)
new_text_prompt = gpt3_paraphrase(chunk, instruction)
f = open('altered_text_prompts.txt', 'a')
f.write('\nInstruction:\t')
f.write(instruction)
f.write('\nOriginal:\t')
f.write(chunk)
f.write('\nAltered:\t')
f.write(new_text_prompt)
f.write('\n*********')
f.close()
| [] |
2024-01-10 | GeorgioFe/solvr-ai | pages~2-PDF_Summarizer.py | '''
Author: Georgio Feghali
Date: July 11 2023
'''
# UI Dependencies.
import streamlit as st
from PIL import Image
# Logic Dependencies.
from langchain.llms import OpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.chains.summarize import load_summarize_chain
import tempfile
import os
# Page Configuration.
favicon = Image.open("./admin/branding/logos/favicon-32x32.png")
st.set_page_config(
page_title="Solvr.ai - PDF Summarizer",
page_icon=favicon,
layout="wide",
initial_sidebar_state="collapsed"
)
# Logic
# Convert file-like object to a temporary file path.
def create_temp_file(file_like_object):
# Step 1: Create a temporary file
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file_path = temp_file.name
# Step 2: Write the contents of the file-like object to the temporary file
with open(temp_file_path, 'wb') as temp_file_writer:
temp_file_writer.write(file_like_object.read())
# Step 3: Return the path to the temporary file
return temp_file_path
# Loads and processes the document to be used by the chain.
def document_loader(file):
file_path = create_temp_file(file)
loader = PyPDFLoader(file_path)
docs = loader.load()
return docs
# Setting up chain for summarizing.
def chain_setup():
OPENAI_API_KEY = st.secrets["openai_api_key"]
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_summarize_chain(llm=llm, chain_type="refine", verbose=False)
return chain
# Gets summary.
def get_summary(document):
chain = chain_setup()
docs = document_loader(document)
answer = chain.run(docs[:])
return answer
# UI
st.markdown("<h1 style='text-align: center;'>PDF Summarizer 📄</h1>", unsafe_allow_html=True)
uploaded_file = st.file_uploader("Choose a PDF file", "pdf")
if uploaded_file is not None:
if st.button('Summmarize!'):
with st.chat_message("assistant"):
with st.spinner('Summarizing PDF...'):
summary = get_summary(uploaded_file)
st.write("Here is the summary of the provided PDF!")
st.markdown(summary) | [] |
2024-01-10 | GeorgioFe/solvr-ai | pages~4-Image_Generation.py | '''
Author: Georgio Feghali
Date: July 11 2023
'''
# UI Dependencies
import streamlit as st
from PIL import Image
# Logic Dependencies
import openai
# Page Configuration.
favicon = Image.open("./admin/branding/logos/favicon-32x32.png")
st.set_page_config(
page_title="Solvr.ai - Image Generation",
page_icon=favicon,
layout="wide",
initial_sidebar_state="collapsed"
)
## Logic
def create_image(prompt):
openai.api_key = st.secrets['openai_api_key']
response = openai.Image.create(
prompt=prompt,
n=1,
size="1024x1024"
)
img_url = response['data'][0]['url']
return img_url
# UI
st.markdown("<h1 style='text-align: center; vertical-align: middle;'>Image Generator 🖼️</h1>", unsafe_allow_html=True)
prompt = st.text_input("Prompt", placeholder="Describe the image you want us to generate for you!")
if st.button("Generate!"):
if prompt == "":
st.warning("Please enter a prompt!")
else:
with st.spinner("Generating Image..."):
img_url = create_image(prompt)
st.text(img_url)
| [
"Describe the image you want us to generate for you!"
] |
2024-01-10 | GeorgioFe/solvr-ai | pages~1-Chat_Assistant.py | '''
Author: Georgio Feghali
Date: July 11 2023
'''
# UI Dependencies.
import streamlit as st
from PIL import Image
# Logic Dependencies.
import openai
# Page Configuration.
favicon = Image.open("./admin/branding/logos/favicon-32x32.png")
st.set_page_config(
page_title="Solvr.ai - Chat Assistant",
page_icon=favicon,
layout="wide",
initial_sidebar_state="collapsed"
)
st.markdown("<h1 style='text-align: center;'>Chat Assistant 🤖</h1>", unsafe_allow_html=True)
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "Hello! How can I help you today?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
openai.api_key = st.secrets['openai_api_key']
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.content) | [
"Hello! How can I help you today?"
] |
2024-01-10 | GeorgioFe/solvr-ai | pages~3-PDF_Q%26A.py | '''
Author: Georgio Feghali
Date: July 11 2023
'''
# UI Dependencies
import streamlit as st
from PIL import Image
# Logic Dependencies
from langchain.document_loaders import PyPDFLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
import tempfile
import os
# Page Configuration
favicon = Image.open("./admin/branding/logos/favicon-32x32.png")
st.set_page_config(
page_title="Solvr.ai - PDF Q&A",
page_icon=favicon,
layout="wide",
initial_sidebar_state="collapsed"
)
# Logic
# Convert file-like object to a temporary file path.
def create_temp_file(file_like_object):
# Step 1: Create a temporary file
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file_path = temp_file.name
# Step 2: Write the contents of the file-like object to the temporary file
with open(temp_file_path, 'wb') as temp_file_writer:
temp_file_writer.write(file_like_object.read())
# Step 3: Return the path to the temporary file
return temp_file_path
# Loads and processes the document to be used by the chain.
def document_loader(file):
file_path = create_temp_file(file)
loader = PyPDFLoader(file_path)
docs = loader.load()
return docs
# Setting up chain for question answering.
def chain_setup():
OPENAI_API_KEY = st.secrets["openai_api_key"]
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm=llm, chain_type="map_rerank", verbose=False, return_intermediate_steps=False)
return chain
# Gets the answer to the question.
def get_answer(document, question: str):
chain = chain_setup()
docs = document_loader(document)
answer = chain.run(input_documents=docs, question=question)
return answer
# UI
st.markdown("<h1 style='text-align: center; vertical-align: middle;'>PDF Q&A ❓</h1>", unsafe_allow_html=True)
uploaded_file = st.file_uploader("Choose a PDF file", "pdf")
if uploaded_file is not None:
question = st.text_input("Ask a question about the PDF file")
if st.button('Get Answer!'):
with st.spinner('Getting answer...'):
answer = get_answer(uploaded_file, question)
st.write("Here is the answer to your question!")
st.markdown(answer) | [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~utils~api_keys~api_key.py | import os
from arbol import asection, aprint
from napari_chatgpt.utils.qt.qt_app import get_or_create_qt_app
__api_key_names = {}
__api_key_names['OpenAI'] = 'OPENAI_API_KEY'
__api_key_names['Anthropic'] = 'ANTHROPIC_API_KEY'
__api_key_names['GoogleBard'] = 'BARD_KEY'
def set_api_key(api_name: str) -> bool:
with asection(f"Setting API key: '{api_name}': "):
# Api key name:
api_key_name = __api_key_names[api_name]
aprint(f"API key name: '{api_key_name}'")
# If key is already present, no need to do anthing:
if is_api_key_available(api_name):
aprint(f"API key is already set as an environment variable!")
return True
# Something technical required for Qt to be happy:
get_or_create_qt_app()
# Get the key from vault or via user, password protected:
from napari_chatgpt.utils.api_keys.api_key_vault_dialog import \
request_if_needed_api_key_dialog
aprint(f"Requesting key from user via user interface...")
api_key = request_if_needed_api_key_dialog(api_name)
# API KEY:
if api_key:
os.environ[api_key_name] = api_key
if api_key_name == 'OPENAI_API_KEY':
import openai
openai.api_key = api_key
else:
return False
def is_api_key_available(api_name: str) -> bool:
# Api key name:
api_key_name = __api_key_names[api_name]
# Check if API key is set:
return api_key_name in dict(os.environ)
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~_sandbox~gpt4all_demo.py | import multiprocessing
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from napari_chatgpt.llm.gpt4all import GPT4AllFixed
from napari_chatgpt.utils.download.gpt4all import get_gpt4all_model
llm_model_name = 'ggml-gpt4all-l13b-snoozy'
model_path = get_gpt4all_model(llm_model_name)
n_threads = multiprocessing.cpu_count() - 1
n_ctx = 1024
n_predict = 2048
temperature = 0.1
# Instantiates Main LLM:
model = GPT4AllFixed(
model=model_path,
verbose=True,
streaming=True,
n_ctx=n_ctx,
n_threads=n_threads,
n_predict=n_predict,
f16_kv=False,
temp=temperature
)
callbacks = [StreamingStdOutCallbackHandler()]
# Generate text. Tokens are streamed through the callback manager.
result = model("Who are you?\n ", callbacks=callbacks)
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~llm~llms.py | import multiprocessing
import os
from arbol import aprint
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from napari_chatgpt.llm.bard import ChatBard
from napari_chatgpt.llm.gpt4all import GPT4AllFixed
from napari_chatgpt.utils.download.gpt4all import get_gpt4all_model
def instantiate_LLMs(llm_model_name: str,
temperature: float,
tool_temperature: float,
chat_callback_handler,
tool_callback_handler,
memory_callback_handler,
verbose: bool = False
):
aprint(f"Instantiating LLMs with model: '{llm_model_name}', t={temperature}, t_tool={tool_temperature}. ")
if 'gpt-' in llm_model_name:
# Instantiates Main LLM:
main_llm = ChatOpenAI(
model_name=llm_model_name,
verbose=verbose,
streaming=True,
temperature=temperature,
callback_manager=AsyncCallbackManager(
[chat_callback_handler])
)
# Instantiates Tool LLM:
tool_llm = ChatOpenAI(
model_name=llm_model_name,
verbose=verbose,
streaming=True,
temperature=tool_temperature,
callback_manager=AsyncCallbackManager([tool_callback_handler])
)
# Instantiates Memory LLM:
memory_llm = ChatOpenAI(
model_name=llm_model_name,
verbose=False,
temperature=temperature,
callback_manager=AsyncCallbackManager([memory_callback_handler])
)
max_token_limit = 8000 if 'gpt-4' in llm_model_name else 2000
if 'bard' in llm_model_name:
# Instantiates Main LLM:
main_llm = ChatBard(
bard_token=os.environ['BARD_KEY'],
verbose=verbose,
streaming=True,
callback_manager=AsyncCallbackManager(
[chat_callback_handler])
)
# Instantiates Tool LLM:
tool_llm = ChatBard(
bard_token=os.environ['BARD_KEY'],
verbose=verbose,
streaming=True,
callback_manager=AsyncCallbackManager([tool_callback_handler])
)
# Instantiates Memory LLM:
memory_llm = ChatBard(
bard_token=os.environ['BARD_KEY'],
verbose=False,
callback_manager=AsyncCallbackManager([memory_callback_handler])
)
max_token_limit = 1000
elif 'claude' in llm_model_name:
# Instantiates Main LLM:
main_llm = ChatAnthropic(
model=llm_model_name,
verbose=verbose,
streaming=True,
temperature=temperature,
max_tokens_to_sample=4096,
callback_manager=AsyncCallbackManager(
[chat_callback_handler])
)
# Instantiates Tool LLM:
tool_llm = ChatAnthropic(
model=llm_model_name,
verbose=verbose,
streaming=True,
temperature=tool_temperature,
max_tokens_to_sample=4096,
callback_manager=AsyncCallbackManager([tool_callback_handler])
)
# Instantiates Memory LLM:
memory_llm = ChatAnthropic(
model=llm_model_name,
verbose=False,
temperature=temperature,
max_tokens_to_sample=4096,
callback_manager=AsyncCallbackManager([memory_callback_handler])
)
max_token_limit = 8000
elif 'ggml' in llm_model_name:
model_path = get_gpt4all_model(llm_model_name)
n_threads = multiprocessing.cpu_count() - 1
n_ctx = 1400
n_predict = 1200
# Instantiates Main LLM:
main_llm = GPT4AllFixed(
model=model_path,
verbose=verbose,
streaming=True,
n_ctx=n_ctx,
n_threads=n_threads,
n_predict=n_predict,
f16_kv=True,
temp=temperature,
callback_manager=AsyncCallbackManager(
[chat_callback_handler])
)
# Too costly to instantiate 3!
memory_llm = main_llm
tool_llm = main_llm
max_token_limit = n_ctx
return main_llm, memory_llm, tool_llm, max_token_limit
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~chat_server~callbacks~callbacks_handle_chat.py | import asyncio
from pprint import pprint
from typing import Any, Dict, Union, List, Optional
from uuid import UUID
from arbol import aprint
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import AgentFinish, AgentAction, LLMResult, BaseMessage
from napari_chatgpt.chat_server.chat_response import ChatResponse
from napari_chatgpt.omega.omega_agent.agent_output_parser import parse_command
from napari_chatgpt.utils.strings.camel_case_to_normal import \
camel_case_to_lower_case
class ChatCallbackHandler(AsyncCallbackHandler):
"""Callback handler for chat responses."""
def __init__(self, websocket, verbose: bool = False):
self.websocket = websocket
self.verbose = verbose
self.last_tool_used = ''
self.last_tool_input = ''
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
"""Run when a chat model starts running."""
if self.verbose:
aprint(
f"CHAT on_chat_model_start: serialized={serialized}, messages={messages}, run_id={run_id}, parent_run_id={parent_run_id}, kwargs={kwargs}")
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
pprint(prompts)
resp = ChatResponse(sender="agent", message='', type="typing")
await self.websocket.send_json(resp.dict())
async def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
async def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
async def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any
) -> Any:
"""Run when chain starts running."""
if self.verbose:
aprint(
f"CHAT on_chain_start: serialized={serialized}, inputs={inputs}")
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
if self.verbose:
aprint(f"CHAT on_chain_end: {outputs}")
async def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
if self.verbose:
aprint(f"CHAT on_chain_error: {error}")
async def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
if self.verbose:
aprint(
f"CHAT on_tool_start: serialized={serialized}, input_str={input_str}")
async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
if self.verbose:
aprint(f"CHAT on_tool_end: output={output}")
async def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
if self.verbose:
aprint(f"CHAT on_tool_error: {error}")
error_type = type(error).__name__
error_message = ', '.join(error.args)
message = f"Failed because:\n'{error_message}'\nException: '{error_type}'\n"
resp = ChatResponse(sender="agent", message=message, type="error")
asyncio.run(self.websocket.send_json(resp.dict()))
async def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
if self.verbose:
aprint(f"CHAT on_text: {text}")
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
if self.verbose:
aprint(f"CHAT on_agent_action: {action}")
tool = camel_case_to_lower_case(action.tool)
message = f"I am using the {tool} to tackle your request: '{action.tool_input}'"
self.last_tool_used = tool
self.last_tool_input = action.tool_input
if not parse_command([action.tool],action.log):
message += f"\n {action.log}"
resp = ChatResponse(sender="agent", message=message, type="action")
await self.websocket.send_json(resp.dict())
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
if self.verbose:
aprint(f"CHAT on_agent_finish: {finish}")
# message = finish.return_values['output']
# resp = ChatResponse(sender="agent", message=message, type="finish")
# await self.websocket.send_json(resp.dict())
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~llm~gpt4all.py | """Wrapper for the GPT4All model."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional
from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun
from langchain.llms import GPT4All
_aysync_gpt4all_thread_pool = ThreadPoolExecutor()
class GPT4AllFixed(GPT4All):
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
result = await asyncio.get_running_loop().run_in_executor(
_aysync_gpt4all_thread_pool,
self._call,
prompt,
stop,
run_manager
)
return result
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~llm~bard.py | import asyncio
import re
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Dict, List, Optional
from Bard import Chatbot
from langchain.callbacks.manager import (
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from pydantic import Field
from pydantic import root_validator
_aysync_bard_thread_pool = ThreadPoolExecutor()
class ChatBard(LLM):
r"""Wrapper around Google's large language model Bard.
"""
streaming: bool = False
"""Whether to stream the results or not."""
bard_token: str = Field(default=None)
bard: Chatbot = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
pass
return values
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "google-bard"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
r"""Call out to Bard's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token,
verbose=self.verbose)
text = ""
for token in self._bard(prompt):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
result = await asyncio.get_running_loop().run_in_executor(
_aysync_bard_thread_pool,
self._call,
prompt,
stop,
run_manager
)
return result
def _bard(self,
prompt: str):
if not self.bard:
self.bard = Chatbot(self.bard_token)
response = self.bard.ask(prompt)
text = response['content']
words = re.split(r"(\s+)", text)
for word in words:
yield word
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~_sandbox~conv_agent_demo.py | from langchain import WikipediaAPIWrapper
from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.agents.load_tools import _get_llm_math
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from napari_chatgpt.omega.tools.search.web_search_tool import WebSearchTool
llm = ChatOpenAI(temperature=0)
memory = ConversationBufferMemory(memory_key="chat_history",
return_messages=True)
wiki = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
tools = [wiki, WebSearchTool(), _get_llm_math(llm), PythonREPLTool()]
system_message = """Omega is a large language model trained by OpenAI.
Omega is designed to be able to assist with a wide range of tasks,
from answering simple questions to providing in-depth explanations and
discussions on a wide range of topics. As a language model, Omega is
able to generate human-like text based on the input it receives, allowing
it to engage in natural-sounding conversations and provide responses that
are coherent and relevant to the topic at hand.
Omega is constantly learning and improving, and its capabilities are
constantly evolving. It is able to process and understand large amounts of text,
and can use this knowledge to provide accurate and informative responses to a wide
range of questions. Additionally, Omega is able to generate its own text based
on the input it receives, allowing it to engage in discussions and provide explanations
and descriptions on a wide range of topics.
Overall, Omega is a powerful system that can help with a wide range of tasks
and provide valuable insights and information on a wide range of topics.
Whether you need help with a specific question or just want to have a conversation
about a particular topic, Assistant is here to assist."""
human_message = """TOOLS
------
Omega can ask the user to use tools to look up information that may be helpful
in answering the users original question. The tools the human can use are:
{{tools}}
{format_instructions}
USER'S INPUT
--------------------
Here is the user's input (remember to respond with a markdown code snippet of a
json blob with a single action, and NOTHING else):
{{{{input}}}}"""
agent_kwargs = {'system_message': system_message,
'human_message': human_message}
agent_chain = initialize_agent(tools,
llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory,
agent_kwargs=agent_kwargs
)
while True:
query = input()
if query == 'quit':
break
print(agent_chain.run(input=query))
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~omega~tools~search~wikipedia_query_tool.py | import traceback
from arbol import asection, aprint
from langchain import WikipediaAPIWrapper
from napari_chatgpt.omega.tools.async_base_tool import AsyncBaseTool
_api_wrapper = WikipediaAPIWrapper()
class WikipediaQueryTool(AsyncBaseTool):
"""Tool that adds the capability to search using the Wikipedia API."""
name = "WikipediaQueryTool"
description = (
"Use this tool to answer general questions on topics covered by an encyclopedia "
"such as historical events, scientific concepts, geography... "
"for which you don't already have the answer. "
"Input must be a plain text wikipedia search query. "
"Do NOT use this tool if you already have the answer."
)
def _run(self, query: str) -> str:
"""Use the Wikipedia tool."""
try:
with asection(f"WikipediaQueryTool: query= {query} "):
# Run wikipedia query:
result = _api_wrapper.run(query)
with asection(f"Result:"):
aprint(result)
return result
except Exception as e:
traceback.print_exc()
return f"Error: {type(e).__name__} with message: '{str(e)}' occured while trying to query wikipedia for: '{query}'."
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~_widget.py | """
This module is an example of a barebones QWidget plugin for napari
It implements the Widget specification.
see: https://napari.org/stable/plugins/guides.html?#widgets
Replace code below according to your needs.
"""
import sys
from typing import TYPE_CHECKING
from napari_chatgpt.chat_server.chat_server import NapariChatServer
from napari_chatgpt.utils.api_keys.api_key import set_api_key
from napari_chatgpt.utils.python.installed_packages import \
is_package_installed
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QLabel, QCheckBox
from PyQt5.QtWidgets import QVBoxLayout, QComboBox
from napari.viewer import Viewer
from qtpy.QtWidgets import QPushButton, QWidget
if TYPE_CHECKING:
pass
from arbol import aprint, asection
_creativity_mapping = {}
_creativity_mapping['normal'] = 0.01
_creativity_mapping['slightly creative'] = 0.05
_creativity_mapping['moderately creative'] = 0.1
_creativity_mapping['creative'] = 0.2
class OmegaQWidget(QWidget):
# your QWidget.__init__ can optionally request the napari viewer instance
# in one of two ways:
# 1. use a parameter called `napari_viewer`, as done here
# 2. use a type annotation of 'napari.viewer.Viewer' for any parameter
def __init__(self, napari_viewer):
super().__init__()
aprint("OmegaQWidget instantiated!")
# Napari viewer instance:
self.viewer = napari_viewer
# Napari chat server instance:
self.server: NapariChatServer = None
# Create a QVBoxLayout instance
self.layout = QVBoxLayout()
# Set layout alignment:
self.layout.setAlignment(Qt.AlignTop)
# Add elements to UI:
self._model_selection()
self._creativity_level()
self._memory_type_selection()
self._personality_selection()
self._fix_imports()
self._fix_bad_version_calls()
self._install_missing_packages()
self._autofix_mistakes()
self._autofix_widgets()
self._verbose()
self._start_omega_button()
# Set the layout on the application's window
self.setLayout(self.layout)
def _model_selection(self):
aprint("Setting up model selection UI.")
# Create a QLabel instance
self.model_label = QLabel("Select a model:")
# Add the label to the layout
self.layout.addWidget(self.model_label)
# Create a QComboBox instance
self.model_combo_box = QComboBox()
# Set tooltip for the combo box
self.model_combo_box.setToolTip(
"Choose an LLM model. Best models are GPT4 and GPT3.5, \n"
"with Claude a bit behind, other models are experimental\n"
"and unfortunately barely usable.")
# Add OpenAI models to the combo box:
with asection(f"Enumerating all OpenAI ChatGPT models:"):
import openai
set_api_key('OpenAI')
for model in openai.Model.list().data:
model_id = model.openai_id
if 'gpt' in model_id:
aprint(model_id)
self.model_combo_box.addItem(model_id)
# if is_package_installed('googlebard'):
self.model_combo_box.addItem('bard')
if is_package_installed('anthropic'):
# Add Anthropic models to the combo box:
self.model_combo_box.addItem('claude-2')
self.model_combo_box.addItem('claude-instant-1')
if is_package_installed('pygpt4all'):
self.model_combo_box.addItem('ggml-mpt-7b-chat')
self.model_combo_box.addItem('ggml-gpt4all-j-v1.3-groovy')
self.model_combo_box.addItem('ggml-gpt4all-l13b-snoozy')
# Connect the activated signal to a slot
# self.model_combo_box.activated[str].connect(self.onActivated)
# Add the combo box to the layout
self.layout.addWidget(self.model_combo_box)
def _creativity_level(self):
aprint("Setting up creativity level UI.")
# Create a QLabel instance
self.creativity_label = QLabel("Chose the level of creativity:")
# Add the label to the layout
self.layout.addWidget(self.creativity_label)
# Creativity combobox:
self.creativity_combo_box = QComboBox()
self.creativity_combo_box.setToolTip(
"Choose the level of creativity of Omega\n"
"The less creative the more deterministic\n"
"and accurate the results.\n"
"Teh more creative, the more fantasy and\n"
"the less competent it is at code generation\n"
"and precise reasoning.")
# Add values:
self.creativity_combo_box.addItem('normal')
self.creativity_combo_box.addItem('slightly creative')
self.creativity_combo_box.addItem('moderately creative')
self.creativity_combo_box.addItem('creative')
self.creativity_combo_box.setCurrentIndex(0)
# Add the creativity combobox to the layout:
self.layout.addWidget(self.creativity_combo_box)
def _memory_type_selection(self):
aprint("Setting up memory type UI.")
# Create a QLabel instance
self.memory_type_label = QLabel("Select a memory type:")
# Add the label to the layout
self.layout.addWidget(self.memory_type_label)
# Create a QComboBox instance
self.memory_type_combo_box = QComboBox()
self.memory_type_combo_box.setToolTip(
"'hybrid' is best as it combines accurate short-term memory \n"
"with summarised long term memory. 'bounded' only remembers \n"
"the last few messages. 'infinite' remembers everything.")
# Add memory types:
self.memory_type_combo_box.addItem('hybrid')
self.memory_type_combo_box.addItem('bounded')
self.memory_type_combo_box.addItem('infinite')
# Add the combo box to the layout
self.layout.addWidget(self.memory_type_combo_box)
def _personality_selection(self):
aprint("Setting up personality UI.")
# Create a QLabel instance
self.agent_personality_label = QLabel("Select a personality:")
# Add the label to the layout
self.layout.addWidget(self.agent_personality_label)
# Create a QComboBox instance
self.agent_personality_combo_box = QComboBox()
self.agent_personality_combo_box.setToolTip(
"Personalities affect the style of the answers\n"
"but (hopefully) not their quality")
# Add characters:
self.agent_personality_combo_box.addItem('coder')
self.agent_personality_combo_box.addItem('neutral')
self.agent_personality_combo_box.addItem('prof')
self.agent_personality_combo_box.addItem('mobster')
self.agent_personality_combo_box.addItem('yoda')
# Add the combo box to the layout
self.layout.addWidget(self.agent_personality_combo_box)
def _fix_imports(self):
aprint("Setting up fix imports UI.")
# Create a QLabel instance
self.fix_imports_checkbox = QCheckBox("Fix missing imports")
self.fix_imports_checkbox.setChecked(True)
self.fix_imports_checkbox.setToolTip(
"Uses LLM to check for missing imports.\n"
"This involves a LLM call which can incur additional\n"
"cost in time and possibly money."
)
# Add the fix_imports checkbox to the layout:
self.layout.addWidget(self.fix_imports_checkbox)
def _fix_bad_version_calls(self):
aprint("Setting up bad version imports UI.")
# Create a QLabel instance
self.fix_bad_calls_checkbox = QCheckBox("Fix bad function calls")
self.fix_bad_calls_checkbox.setChecked(True)
self.fix_bad_calls_checkbox.setToolTip("Uses LLM to fix function calls.\n"
"When turned on, this detects wrong function calls, \n"
"possibly because of library version mismatch and fixes,"
"replaces the offending code with the right version! "
"This involves a LLM call which can incurr additional\n"
"cost in time and possibly money."
)
# Add the fix_code checkbox to the layout:
self.layout.addWidget(self.fix_bad_calls_checkbox)
def _install_missing_packages(self):
aprint("Setting up install missing packages UI.")
# Create a QLabel instance
self.install_missing_packages_checkbox = QCheckBox(
"Install missing packages")
self.install_missing_packages_checkbox.setChecked(True)
self.install_missing_packages_checkbox.setToolTip(
"Uses LLM to figure out which packages to install.\n"
"This involves a LLM call which can incur additional\n"
"cost in time and possibly money.")
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.install_missing_packages_checkbox)
def _autofix_mistakes(self):
aprint("Setting up autofix mistakes UI.")
# Create a QLabel instance
self.autofix_mistakes_checkbox = QCheckBox(
"Autofix coding mistakes")
self.autofix_mistakes_checkbox.setChecked(False)
self.autofix_mistakes_checkbox.setToolTip(
"When checked Omega will try to fix on its own coding mistakes\n"
"when processing data and interacting with the napari viewer.\n"
"This does not include making widgets!\n"
"Works so-so with ChatGPT 3.5, but works well with ChatGPT 4.\n"
"This involves a LLM call which can incur additional\n"
"cost in time and possibly money.")
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.autofix_mistakes_checkbox)
def _autofix_widgets(self):
aprint("Setting up autofix widgets UI.")
# Create a QLabel instance
self.autofix_widgets_checkbox = QCheckBox(
"Autofix widget coding mistakes")
self.autofix_widgets_checkbox.setChecked(False)
self.autofix_widgets_checkbox.setToolTip(
"When checked Omega will try to fix its own \n"
"coding mistakes when making widgets. \n"
"Works so-so with ChatGPT 3.5, but works well with ChatGPT 4.\n"
"This involves a LLM call which can incur additional\n"
"cost in time and possibly money.")
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.autofix_widgets_checkbox)
def _verbose(self):
aprint("Setting up verbose UI.")
# Create a QLabel instance
self.verbose_checkbox = QCheckBox(
"High console verbosity")
self.verbose_checkbox.setChecked(False)
self.verbose_checkbox.setToolTip(
"High level of verbosity in the console\n"
"This includes a lot of internal logging\n"
"from the langchain library.\n"
"Nearly incomprehensible, but usefull\n"
"if you are interested to see the prompts\n"
"in action...")
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.verbose_checkbox)
def _start_omega_button(self):
aprint("Setting up start Omega button UI.")
# Start Omega button:
self.start_omega_button = QPushButton("Start Omega")
self.start_omega_button.clicked.connect(self._on_click)
self.start_omega_button.setToolTip(
"Start Omega, this will open a browser window.\n"
"You can restart Omega with new settings by\n"
"clicking again this button. This closes the\n"
"previous session.")
# Omega button:
self.layout.addWidget(self.start_omega_button)
def _on_click(self):
aprint("Starting Omega now!")
# Stop previous instance if it exists:
if self.server:
self.server.stop()
# Temperature:
temperature = float(_creativity_mapping[
self.creativity_combo_box.currentText()])
tool_temperature = 0.1*temperature
from napari_chatgpt.chat_server.chat_server import start_chat_server
self.server = start_chat_server(self.viewer,
llm_model_name=self.model_combo_box.currentText(),
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=self.memory_type_combo_box.currentText(),
agent_personality=self.agent_personality_combo_box.currentText(),
fix_imports=self.fix_imports_checkbox.isChecked(),
install_missing_packages=self.install_missing_packages_checkbox.isChecked(),
fix_bad_calls=self.fix_bad_calls_checkbox.isChecked(),
autofix_mistakes=self.autofix_mistakes_checkbox.isChecked(),
autofix_widget=self.autofix_widgets_checkbox.isChecked(),
verbose=self.verbose_checkbox.isChecked()
)
def main():
app = QApplication(sys.argv)
# You need to create an instance of napari.viewer.Viewer
# I'm creating a dummy instance here, replace it with a real instance if needed
viewer = Viewer()
widget = OmegaQWidget(viewer)
widget.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~omega~omega_init.py | from queue import Queue
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.manager import AsyncCallbackManager, CallbackManager
from langchain.schema import BaseMemory
from napari_chatgpt.omega.omega_agent.agent import OmegaAgent
from napari_chatgpt.omega.omega_agent.agent_executor import \
OmegaAgentExecutor
from napari_chatgpt.omega.omega_agent.prompts import PREFIX, SUFFIX, PERSONALITY
from napari_chatgpt.omega.tools.napari.cell_nuclei_segmentation import \
CellNucleiSegmentationTool
from napari_chatgpt.omega.tools.napari.file_open_tool import NapariFileOpenTool
from napari_chatgpt.omega.tools.napari.image_denoising import ImageDenoisingTool
from napari_chatgpt.omega.tools.napari.viewer_control_tool import \
NapariViewerControlTool
from napari_chatgpt.omega.tools.napari.viewer_query_tool import \
NapariViewerQueryTool
from napari_chatgpt.omega.tools.napari.widget_maker_tool import \
NapariWidgetMakerTool
from napari_chatgpt.omega.tools.napari_plugin_tool import \
NapariPluginTool
from napari_chatgpt.omega.tools.search.web_image_search_tool import \
WebImageSearchTool
from napari_chatgpt.omega.tools.search.web_search_tool import WebSearchTool
from napari_chatgpt.omega.tools.search.wikipedia_query_tool import \
WikipediaQueryTool
from napari_chatgpt.omega.tools.special.exception_catcher_tool import \
ExceptionCatcherTool
from napari_chatgpt.omega.tools.special.functions_info_tool import \
PythonFunctionsInfoTool
from napari_chatgpt.omega.tools.special.human_input_tool import HumanInputTool
from napari_chatgpt.omega.tools.special.python_repl import PythonCodeExecutionTool
from napari_chatgpt.utils.omega_plugins.discover_omega_plugins import \
discover_omega_tools
# Default verbosity to False:
langchain.verbose = False
def initialize_omega_agent(to_napari_queue: Queue = None,
from_napari_queue: Queue = None,
main_llm: BaseLanguageModel = None,
tool_llm: BaseLanguageModel = None,
is_async: bool = False,
chat_callback_handler: BaseCallbackHandler = None,
tool_callback_handler: BaseCallbackHandler = None,
has_human_input_tool: bool = True,
memory: BaseMemory = None,
agent_personality: str = 'neutral',
fix_imports: bool = True,
install_missing_packages: bool = True,
fix_bad_calls: bool = True,
autofix_mistakes: bool = False,
autofix_widget: bool = False,
verbose: bool = False
) -> OmegaAgentExecutor:
chat_callback_manager = (AsyncCallbackManager(
[chat_callback_handler]) if is_async else CallbackManager(
[chat_callback_handler])) if chat_callback_handler else None
tool_callback_manager = (CallbackManager(
[tool_callback_handler])) if chat_callback_handler else None
tools = [WikipediaQueryTool(callback_manager=tool_callback_manager),
WebSearchTool(callback_manager=tool_callback_manager),
PythonFunctionsInfoTool(callback_manager=tool_callback_manager),
ExceptionCatcherTool(callback_manager=tool_callback_manager),
# FileDownloadTool(),
PythonCodeExecutionTool(callback_manager=tool_callback_manager)
]
if has_human_input_tool:
tools.append(HumanInputTool(callback_manager=tool_callback_manager))
if to_napari_queue:
kwargs = {'llm': tool_llm,
'to_napari_queue': to_napari_queue,
'from_napari_queue': from_napari_queue,
'callback_manager': tool_callback_manager,
'fix_imports': fix_imports,
'install_missing_packages': install_missing_packages,
'fix_bad_calls': fix_bad_calls,
'verbose': verbose
}
tools.append(NapariViewerControlTool(**kwargs, return_direct=not autofix_mistakes))
tools.append(NapariViewerQueryTool(**kwargs, return_direct=not autofix_mistakes))
tools.append(NapariWidgetMakerTool(**kwargs, return_direct=not autofix_widget))
tools.append(NapariFileOpenTool(**kwargs))
tools.append(WebImageSearchTool(**kwargs))
tools.append(CellNucleiSegmentationTool(**kwargs))
tools.append(ImageDenoisingTool(**kwargs))
tool_classes = discover_omega_tools()
for tool_class in tool_classes:
if 'ExampleOmegaTool' in tool_class.__name__:
# This is just an example/tempate!
# Avoids having to test this with a separate repo!
continue
tools.append(NapariPluginTool(
plugin_tool_instance=tool_class(),
name=tool_class.name,
type=tool_class.type,
description=tool_class.description,
prompt=tool_class.description,
return_direct=tool_class.return_direct,
lm=tool_llm,
to_napari_queue=to_napari_queue,
from_napari_queue=from_napari_queue,
callback_manager=tool_callback_manager))
# prepend the personality:
PREFIX_ = PREFIX + PERSONALITY[agent_personality]
agent = OmegaAgent.from_llm_and_tools(
llm=main_llm,
tools=tools,
system_message=PREFIX_,
human_message=SUFFIX,
verbose=verbose,
callback_manager=chat_callback_manager,
)
executor = OmegaAgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
callback_manager=chat_callback_manager
)
return executor
| [] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~omega~tools~napari~napari_base_tool.py | """A tool for running python code in a REPL."""
import sys
from pathlib import Path
from queue import Queue
from typing import Union, Optional
import napari
import numpy
from arbol import aprint, asection
from langchain import LLMChain, PromptTemplate
from langchain.chat_models.base import BaseChatModel
from langchain.llms.base import LLM
from napari import Viewer
from pydantic import Field
from napari_chatgpt.omega.tools.async_base_tool import AsyncBaseTool
from napari_chatgpt.utils.python.exception_guard import ExceptionGuard
from napari_chatgpt.utils.python.fix_bad_fun_calls import \
fix_all_bad_function_calls
from napari_chatgpt.utils.python.installed_packages import \
installed_package_list
from napari_chatgpt.omega.tools.instructions import \
omega_generic_codegen_instructions
from napari_chatgpt.utils.python.missing_packages import required_packages
from napari_chatgpt.utils.python.pip_utils import pip_install
from napari_chatgpt.utils.python.required_imports import required_imports
from napari_chatgpt.utils.strings.extract_code import extract_code_from_markdown
from napari_chatgpt.utils.strings.filter_lines import filter_lines
def _get_delegated_code(name: str, signature: bool = False):
with asection(f"Getting delegated code: '{name}' (signature={signature})"):
# Get current package folder:
current_package_folder = Path(__file__).parent
# Get package folder:
package_folder = Path.joinpath(current_package_folder, f"delegated_code")
# file path:
file_path = Path.joinpath(package_folder, f"{name}.py")
aprint(f'Filepath: {file_path}')
# code:
code = file_path.read_text()
# extract signature:
if signature:
aprint('Extracting signature!')
splitted_code = code.split('### SIGNATURE')
code = splitted_code[1]
return code
class NapariBaseTool(AsyncBaseTool):
"""A base tool for that delegates to execution to a sub-LLM and communicates with napari via queues."""
name: str = "<NAME>"
description: str = (
"Enter"
"Description"
"Here"
)
code_prefix: str = ''
instructions: str = omega_generic_codegen_instructions
prompt: str = None
to_napari_queue: Queue = Field(default=None)
from_napari_queue: Queue = Field(default=None)
llm: Union[BaseChatModel, LLM] = Field(default=None)
return_direct: bool = False
save_last_generated_code: bool = True
fix_imports = True
install_missing_packages = True
fix_bad_calls = False
verbose = False
last_generated_code: Optional[str] = None
def _run(self, query: str) -> str:
"""Use the tool."""
if self.prompt:
# Instantiate chain:
chain = LLMChain(
prompt=self._get_prompt_template(),
llm=self.llm,
verbose=self.verbose,
callbacks=self.callbacks
)
# chain.callback_manager.add_handler(ToolCallbackHandler(type(self).__name__))
# chain.callbacks.add_handler(ArbolCallbackHandler())
# List of installed packages:
package_list = installed_package_list()
if self.last_generated_code:
last_generated_code = "**Previously Generated Code:**\n",
last_generated_code += ("Use this code for reference, usefull if you need to modify or fix the code. ",
"IMPORTANT: This code might not be relevant to the current request or task! "
"You should ignore it, unless you are explicitely asked to fix or modify the last generated widget!",
"```python\n",
self.last_generated_code + '\n',
"```\n"
)
else:
last_generated_code = ''
# Adding information about packages and Python version to instructions:
filled_generic_instructions = omega_generic_codegen_instructions.format(
python_version=str(sys.version.split()[0]),
packages=', '.join(package_list))
# Prepend generic instructions to tool specific instructions:
instructions = filled_generic_instructions + self.instructions
# Variable for prompt:
variables = {"input": query,
"instructions": instructions,
"last_generated_code": last_generated_code,
}
# call LLM:
code = chain(variables)['text']
aprint(f"code:\n{code}")
else:
# No code generated because no sub-LLM delegation, delegated_function has the buisness logic.
code = None
# Update last generated code:
if self.save_last_generated_code:
self.last_generated_code = code
# Setting up delegated fuction:
delegated_function = lambda v: self._run_code(query, code, v)
# Send code to napari:
self.to_napari_queue.put(delegated_function)
# Get response:
response = self.from_napari_queue.get()
if isinstance(response, ExceptionGuard):
exception_guard = response
# raise exception_guard.exception
return f"Error: {exception_guard.exception_type_name} with message: '{str(exception_guard.exception)}' while using tool: {self.__class__.__name__} ."
return response
def _run_code(self, query: str, code: str, viewer: Viewer) -> str:
"""
This is the code that is executed, see implementations for details,
must return 'Success: ...' if things went well, otherwise it is failure!
"""
raise NotImplementedError("This method must be implemented")
def _get_prompt_template(self):
prompt_template = PromptTemplate(template=self.prompt,
input_variables=["input",
"last_generated_code",
"instructions"])
return prompt_template
def _prepare_code(self,
code: str,
markdown: bool = True,
do_fix_bad_calls: bool = True):
with asection(f"NapariBaseTool: _prepare_code(markdown={markdown}) "):
with asection(f"code to prepare:"):
aprint(code)
# extract code from markdown:
if markdown:
code = extract_code_from_markdown(code)
# Prepend prefix:
code = self.code_prefix + code
# Add spaces around code:
code = '\n\n' + code + '\n\n'
if self.fix_imports:
# Are there any missing imports?
imports = required_imports(code, llm=self.llm)
# prepend missing imports:
code = '\n'.join(imports) + '\n\n' + code
# Fix code, this takes care of wrong function calls and more:
if self.fix_bad_calls and do_fix_bad_calls:
code, _ = fix_all_bad_function_calls(code)
# Remove any offending lines:
code = filter_lines(code,
['napari.Viewer(', '= Viewer(', 'gui_qt(', 'viewer.window.add_dock_widget('])
with asection(f"code after all preparations and fixes:"):
aprint(code)
if self.install_missing_packages:
# Are there missing libraries that need to be installed?
packages = required_packages(code, llm=self.llm)
# Install them:
pip_install(packages)
# Return fully prepared and fixed code:
return code
def _generate_viewer_info(viewer):
layer_info = '**Napari viewer information:**'
layer_info += "| Layer Type | Properties |\n| --- | --- |\n"
for layer in viewer.layers:
properties = ""
# Layer type
properties += f"| {layer.__class__.__name__} | "
# Image layer
if isinstance(layer, napari.layers.Image):
properties += f"dtype: {layer.data.dtype}, "
properties += f"shape: {layer.data.shape}, "
properties += f"min: {numpy.min(layer.data)}, "
properties += f"max: {numpy.max(layer.data)} "
# Label layer
elif isinstance(layer, napari.layers.Labels):
properties += f"Number of labels: {len(layer.data)} "
# # Other layer types
# else:
# # Add relevant and similarly useful information for other layer types
# properties += "Additional information goes here "
properties += "|\n"
layer_info += properties
return layer_info
| [
"input",
"instructions",
"last_generated_code",
"None"
] |
2024-01-10 | jqwhite/napari-chatgpt | src~napari_chatgpt~chat_server~chat_server.py | """Main entrypoint for the app."""
import os
import traceback
import webbrowser
from threading import Thread
from time import sleep
import napari
from PyQt5.QtCore import QTimer
from arbol import aprint, asection
from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
from fastapi.templating import Jinja2Templates
from langchain.memory import ConversationTokenBufferMemory, \
ConversationBufferMemory
from langchain.schema import get_buffer_string, BaseMemory
from starlette.staticfiles import StaticFiles
from uvicorn import Config, Server
from napari_chatgpt.chat_server.callbacks.callbacks_handle_chat import \
ChatCallbackHandler
from napari_chatgpt.chat_server.callbacks.callbacks_handler_tool import \
ToolCallbackHandler
from napari_chatgpt.chat_server.callbacks.callbacks_stdout import \
ArbolCallbackHandler
from napari_chatgpt.chat_server.chat_response import ChatResponse
from napari_chatgpt.llm.llms import instantiate_LLMs
from napari_chatgpt.omega.memory.memory import OmegaMemory
from napari_chatgpt.omega.napari_bridge import NapariBridge
from napari_chatgpt.omega.omega_init import initialize_omega_agent
from napari_chatgpt.utils.api_keys.api_key import set_api_key
from napari_chatgpt.utils.download.gpt4all import get_gpt4all_model
from napari_chatgpt.utils.python.installed_packages import is_package_installed
class NapariChatServer:
def __init__(self,
napari_bridge: NapariBridge,
llm_model_name: str = 'gpt-3.5-turbo',
temperature: float = 0.01,
tool_temperature: float = 0.01,
memory_type: str = 'standard',
agent_personality: str = 'neutral',
fix_imports: bool = True,
install_missing_packages: bool = True,
fix_bad_calls: bool = True,
autofix_mistakes: bool = False,
autofix_widget: bool = False,
verbose: bool = False
):
# Flag to keep server running, or stop it:
self.running = True
self.uvicorn_server = None
# Napari bridge:
self.napari_bridge = napari_bridge
# Instantiate FastAPI:
self.app = FastAPI()
# Mount static files:
static_files_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'static')
self.app.mount("/static", StaticFiles(directory=static_files_path),
name="static")
templates_files_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'templates')
# Load Jinja2 templates:
templates = Jinja2Templates(directory=templates_files_path)
# Server startup event:
@self.app.on_event("startup")
async def startup_event():
pass
# Default path:
@self.app.get("/")
async def get(request: Request):
return templates.TemplateResponse("index.html",
{"request": request})
# Chat path:
@self.app.websocket("/chat")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
# Chat callback handler:
chat_callback_handler = ChatCallbackHandler(websocket,
verbose=verbose)
# Tool callback handler:
tool_callback_handler = ToolCallbackHandler(websocket,
verbose=verbose)
# Memory callback handler:
memory_callback_handler = ArbolCallbackHandler('Memory')
main_llm, memory_llm, tool_llm, max_token_limit = instantiate_LLMs(
llm_model_name=llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
chat_callback_handler=chat_callback_handler,
tool_callback_handler=tool_callback_handler,
memory_callback_handler=memory_callback_handler
)
# Instantiate Memory:
memory: BaseMemory = None
if memory_type == 'bounded':
memory = ConversationTokenBufferMemory(
memory_key="chat_history",
return_messages=True,
max_token_limit=max_token_limit)
elif memory_type == 'infinite':
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True)
elif memory_type == 'hybrid':
memory = OmegaMemory(
llm=memory_llm,
memory_key="chat_history",
return_messages=True,
max_token_limit=max_token_limit)
else:
raise ValueError('Invalid Memory parameter!')
# Agent
agent_chain = initialize_omega_agent(
to_napari_queue=napari_bridge.to_napari_queue,
from_napari_queue=napari_bridge.from_napari_queue,
main_llm=main_llm,
tool_llm=tool_llm,
is_async=True,
chat_callback_handler=chat_callback_handler,
tool_callback_handler=tool_callback_handler,
has_human_input_tool=False,
memory=memory,
agent_personality=agent_personality,
fix_imports=fix_imports,
install_missing_packages=install_missing_packages,
fix_bad_calls=fix_bad_calls,
autofix_mistakes=autofix_mistakes,
autofix_widget=autofix_widget,
verbose=verbose
)
dialog_counter = 0
# Dialog Loop:
while True:
with asection(f"Dialog iteration {dialog_counter}:"):
try:
# Receive and send back the client message
question = await websocket.receive_text()
resp = ChatResponse(sender="user",
message=question)
await websocket.send_json(resp.dict())
aprint(f"Human Question/Request:\n{question}\n\n")
# Initiates a response -- empty for now:
start_resp = ChatResponse(sender="agent",
type="start")
await websocket.send_json(start_resp.dict())
# call LLM:
result = await agent_chain.acall(inputs=question)
aprint(
f"Agent response:\n{result['chat_history'][-1]}\n\n")
# finalise agent response:
end_resp = ChatResponse(sender="agent",
message=result['output'],
type="final")
await websocket.send_json(end_resp.dict())
current_chat_history = get_buffer_string(
result['chat_history'])
with asection(
f"Current chat history of {len(result['chat_history'])} messages:"):
aprint(current_chat_history)
except WebSocketDisconnect:
aprint("websocket disconnect")
break
except Exception as e:
traceback.print_exc()
resp = ChatResponse(
sender="agent",
message=f"Sorry, something went wrong ({type(e).__name__}: {str(e)}).",
type="error",
)
await websocket.send_json(resp.dict())
dialog_counter += 1
def _start_uvicorn_server(self, app):
config = Config(app, port=9000)
self.uvicorn_server = Server(config=config)
self.uvicorn_server.run()
def run(self):
self._start_uvicorn_server(self.app)
def stop(self):
self.running = False
if self.uvicorn_server:
self.uvicorn_server.should_exit = True
sleep(2)
def start_chat_server(viewer: napari.Viewer = None,
llm_model_name: str = 'gpt-3.5-turbo',
temperature: float = 0.01,
tool_temperature: float = 0.01,
memory_type: str = 'standard',
agent_personality: str = 'neutral',
fix_imports: bool = True,
install_missing_packages: bool = True,
fix_bad_calls: bool = True,
autofix_mistakes: bool = False,
autofix_widget: bool = False,
verbose: bool = False
):
# Set OpenAI key if necessary:
if 'gpt' in llm_model_name and '4all' not in llm_model_name and is_package_installed(
'openai'):
set_api_key('OpenAI')
if 'bard' in llm_model_name:
set_api_key('GoogleBard')
# Set Anthropic key if necessary:
if 'claude' in llm_model_name and is_package_installed('anthropic'):
set_api_key('Anthropic')
# Download GPT4All model if necessary:
if 'ggml' in llm_model_name and is_package_installed('pygpt4all'):
# The first this is run it will download the file, afterwards
# it uses the downloaded file in ~/.gpt4all
get_gpt4all_model(llm_model_name)
# Instantiates napari viewer:
if not viewer:
viewer = napari.Viewer()
# Instantiates a napari bridge:
bridge = NapariBridge(viewer)
# Instantiates server:
chat_server = NapariChatServer(bridge,
llm_model_name=llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=memory_type,
agent_personality=agent_personality,
fix_imports=fix_imports,
install_missing_packages=install_missing_packages,
fix_bad_calls=fix_bad_calls,
autofix_mistakes=autofix_mistakes,
autofix_widget=autofix_widget,
verbose=verbose
)
# Define server thread code:
def server_thread_function():
# Start Chat server:
chat_server.run()
# Create and start the thread that will run Omega:
server_thread = Thread(target=server_thread_function, args=())
server_thread.start()
# function to open browser on page:
def _open_browser():
url = "http://127.0.0.1:9000"
webbrowser.open(url, new=0, autoraise=True)
# open browser after delay of a few seconds:
QTimer.singleShot(2000, _open_browser)
# Return the server:
return chat_server
if __name__ == "__main__":
start_chat_server()
# Start qt event loop and wait for it to stop:
napari.run()
| [] |
2024-01-10 | donhardman/diffusers | src~diffusers~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl_inpaint.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
deprecate,
is_invisible_watermark_available,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from .pipeline_output import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLInpaintPipeline
>>> from diffusers.utils import load_image
>>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0",
... torch_dtype=torch.float16,
... variant="fp16",
... use_safetensors=True,
... )
>>> pipe.to("cuda")
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
>>> init_image = load_image(img_url).convert("RGB")
>>> mask_image = load_image(mask_url).convert("RGB")
>>> prompt = "A majestic tiger sitting on a bench"
>>> image = pipe(
... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80
... ).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
def mask_pil_to_torch(mask, height, width):
# preprocess mask
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
mask = [mask]
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
mask = mask.astype(np.float32) / 255.0
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
mask = torch.from_numpy(mask)
return mask
def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
"""
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
Args:
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
Raises:
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
# checkpoint. TOD(Yiyi) - need to clean this up later
deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
deprecate(
"prepare_mask_and_masked_image",
"0.30.0",
deprecation_message,
)
if image is None:
raise ValueError("`image` input cannot be undefined.")
if mask is None:
raise ValueError("`mask_image` input cannot be undefined.")
if isinstance(image, torch.Tensor):
if not isinstance(mask, torch.Tensor):
mask = mask_pil_to_torch(mask, height, width)
if image.ndim == 3:
image = image.unsqueeze(0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(1)
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
# assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
# Check image is in [-1, 1]
# if image.min() < -1 or image.max() > 1:
# raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.to(dtype=torch.float32)
elif isinstance(mask, torch.Tensor):
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
# resize all images w.r.t passed height an width
image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
mask = mask_pil_to_torch(mask, height, width)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
if image.shape[1] == 4:
# images are in latent space and thus can't
# be masked set masked_image to None
# we assume that the checkpoint is not an inpainting
# checkpoint. TOD(Yiyi) - need to clean this up later
masked_image = None
else:
masked_image = image * (mask < 0.5)
# n.b. ensure backwards compatibility as old function does not return image
if return_image:
return mask, masked_image, image
return mask, masked_image
class StableDiffusionXLInpaintPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin
):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
of `stabilityai/stable-diffusion-xl-refiner-1-0`.
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
`stabilityai/stable-diffusion-xl-base-1-0`.
add_watermarker (`bool`, *optional*):
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.
"""
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
_optional_components = ["tokenizer", "text_encoder"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
requires_aesthetics_score: bool = False,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.mask_processor = VaeImageProcessor(
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
)
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not self.use_peft_backend:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
scale_lora_layers(self.text_encoder_2, lora_scale)
prompt = [prompt] if isinstance(prompt, str) else prompt
if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
if clip_skip is None:
prompt_embeds = prompt_embeds.hidden_states[-2]
else:
# "2" because SDXL always indexes from the penultimate layer.
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and self.use_peft_backend:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder_2)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(
self,
batch_size,
num_channels_latents,
height,
width,
dtype,
device,
generator,
latents=None,
image=None,
timestep=None,
is_strength_max=True,
add_noise=True,
return_noise=False,
return_image_latents=False,
):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if (image is None or timestep is None) and not is_strength_max:
raise ValueError(
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
"However, either the image or the noise timestep has not been provided."
)
if image.shape[1] == 4:
image_latents = image.to(device=device, dtype=dtype)
elif return_image_latents or (latents is None and not is_strength_max):
image = image.to(device=device, dtype=dtype)
image_latents = self._encode_vae_image(image=image, generator=generator)
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
if latents is None and add_noise:
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# if strength is 1. then initialise the latents to noise, else initial to image + noise
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
# if pure noise then scale the initial latents by the Scheduler's init sigma
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
elif add_noise:
noise = latents.to(device)
latents = noise * self.scheduler.init_noise_sigma
else:
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
latents = image_latents.to(device)
outputs = (latents,)
if return_noise:
outputs += (noise,)
if return_image_latents:
outputs += (image_latents,)
return outputs
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
dtype = image.dtype
if self.vae.config.force_upcast:
image = image.float()
self.vae.to(dtype=torch.float32)
if isinstance(generator, list):
image_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
for i in range(image.shape[0])
]
image_latents = torch.cat(image_latents, dim=0)
else:
image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
if self.vae.config.force_upcast:
self.vae.to(dtype)
image_latents = image_latents.to(dtype)
image_latents = self.vae.config.scaling_factor * image_latents
return image_latents
def prepare_mask_latents(
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = torch.nn.functional.interpolate(
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.to(device=device, dtype=dtype)
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
" of masks that you pass is divisible by the total requested batch size."
)
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
if masked_image is not None and masked_image.shape[1] == 4:
masked_image_latents = masked_image
else:
masked_image_latents = None
if masked_image is not None:
if masked_image_latents is None:
masked_image = masked_image.to(device=device, dtype=dtype)
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.repeat(
batch_size // masked_image_latents.shape[0], 1, 1, 1
)
masked_image_latents = (
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
return mask, masked_image_latents
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
# get the original timestep using init_timestep
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)
timesteps = list(filter(lambda ts: ts < discrete_timestep_cutoff, timesteps))
return torch.tensor(timesteps), len(timesteps)
return timesteps, num_inference_steps - t_start
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
def _get_add_time_ids(
self,
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype,
):
if self.config.requires_aesthetics_score:
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
add_neg_time_ids = list(
negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
)
else:
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if (
expected_add_embed_dim > passed_add_embed_dim
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
)
elif (
expected_add_embed_dim < passed_add_embed_dim
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
)
elif expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
return add_time_ids, add_neg_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
masked_image_latents: torch.FloatTensor = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 0.9999,
num_inference_steps: int = 50,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
clip_skip: Optional[int] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
strength (`float`, *optional*, defaults to 0.9999):
Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
`strength`. The number of denoising steps depends on the amount of noise initially added. When
`strength` is 1, added noise will be maximum and the denoising process will run for the full number of
iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
integer, the value of `strength` will be ignored.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_start (`float`, *optional*):
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a target image resolution. It should be as same
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
aesthetic_score (`float`, *optional*, defaults to 6.0):
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
simulate an aesthetic score of the generated image by influencing the negative text condition.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs
self.check_inputs(
prompt,
prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
clip_skip=clip_skip,
)
# 4. set timesteps
def denoising_value_valid(dnv):
return isinstance(denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(
num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None
)
# check that number of inference steps is not < 1 - as this doesn't make sense
if num_inference_steps < 1:
raise ValueError(
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
)
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
is_strength_max = strength == 1.0
# 5. Preprocess mask and image
init_image = self.image_processor.preprocess(image, height=height, width=width)
init_image = init_image.to(dtype=torch.float32)
mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
if masked_image_latents is not None:
masked_image = masked_image_latents
elif init_image.shape[1] == 4:
# if images are in latent space, we can't mask it
masked_image = None
else:
masked_image = init_image * (mask < 0.5)
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
num_channels_unet = self.unet.config.in_channels
return_image_latents = num_channels_unet == 4
add_noise = True if denoising_start is None else False
latents_outputs = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
image=init_image,
timestep=latent_timestep,
is_strength_max=is_strength_max,
add_noise=add_noise,
return_noise=True,
return_image_latents=return_image_latents,
)
if return_image_latents:
latents, noise, image_latents = latents_outputs
else:
latents, noise = latents_outputs
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
do_classifier_free_guidance,
)
# 8. Check that sizes of mask, masked image and latents match
if num_channels_unet == 9:
# default case for runwayml/stable-diffusion-inpainting
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
elif num_channels_unet != 4:
raise ValueError(
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
)
# 8.1 Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 10. Prepare added time ids & embeddings
if negative_original_size is None:
negative_original_size = original_size
if negative_target_size is None:
negative_target_size = target_size
add_text_embeds = pooled_prompt_embeds
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype=prompt_embeds.dtype,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
denoising_end is not None
and denoising_start is not None
and denoising_value_valid(denoising_end)
and denoising_value_valid(denoising_start)
and denoising_start >= denoising_end
):
raise ValueError(
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {denoising_end} when using type float."
)
elif denoising_end is not None and denoising_value_valid(denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if num_channels_unet == 9:
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if num_channels_unet == 4:
init_latents_proper = image_latents
if do_classifier_free_guidance:
init_mask, _ = mask.chunk(2)
else:
init_mask = mask
if i < len(timesteps) - 1:
noise_timestep = timesteps[i + 1]
init_latents_proper = self.scheduler.add_noise(
init_latents_proper, noise, torch.tensor([noise_timestep])
)
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
if not output_type == "latent":
# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
else:
return StableDiffusionXLPipelineOutput(images=latents)
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| [
"[PLACEHOLDER, PLACEHOLDER]",
"[]",
"False"
] |
2024-01-10 | rukhinov/privateGPT | private_gpt~components~llm~llm_component.py | from injector import inject, singleton
from llama_index.llms import MockLLM
from llama_index.llms.base import LLM
from llama_index.llms.llama_utils import completion_to_prompt, messages_to_prompt
from private_gpt.paths import models_path
from private_gpt.settings.settings import settings
@singleton
class LLMComponent:
llm: LLM
@inject
def __init__(self) -> None:
match settings.llm.mode:
case "local":
from llama_index.llms import LlamaCPP
self.llm = LlamaCPP(
model_path=str(models_path / settings.local.llm_hf_model_file),
temperature=0.1,
# llama2 has a context window of 4096 tokens,
# but we set it lower to allow for some wiggle room
context_window=3900,
generate_kwargs={},
# All to GPU
model_kwargs={"n_gpu_layers": -1},
# transform inputs into Llama2 format
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
case "sagemaker":
from private_gpt.components.llm.custom.sagemaker import SagemakerLLM
self.llm = SagemakerLLM(
endpoint_name=settings.sagemaker.endpoint_name,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
)
case "openai":
from llama_index.llms import OpenAI
openai_settings = settings.openai.api_key
self.llm = OpenAI(api_key=openai_settings)
case "mock":
self.llm = MockLLM()
| [] |
2024-01-10 | moniljhaveri/DeepLearningProject | DDPG%20Codes~DDPG_Mountain_Car_Final.py | """
The below script implements an agent that learns using the Deep Deterministic Policy Gradient algorithm. The script
uses four classes: random noise generator, replay buffer, and actor and critic network classes. After defining these
classes, the main loop runs the two-step learning cycle in which the agent (i) experiments with new actions and
evaluates them and then (ii) improves behavior based on the success of the experimentation.
The script is currently set to create a Mountain Car agent.
"""
import tensorflow as tf
import numpy as np
# Import random noise generator class
from DDPG_Noise import OUNoise
# Import Replay Buffer class and deque data structure
import random
from memory import ReplayBuffer
# Import Actor and Critic network classes
from Actor import ActorNetwork
from Critic import CriticNetwork
#Import OpenAI gym
import gym
from gym import wrappers
import matplotlib.pyplot as plt
import math
# Learning Parameters
# Restore Variable used to load weights
RESTORE = False
# Number of episodes to be run
MAX_EPISODES = 700
# Max number of steps in each episode
MAX_EP_STEPS = 100000
# Learning rates
ACTOR_LEARNING_RATE = 0.0001
CRITIC_LEARNING_RATE = 0.001
# Size of replay buffer reflect how many transitions can be stored at once
BUFFER_SIZE = 10000
# Minibatch size is the number of transitions that are used to update the Q and policy functions
MINIBATCH_SIZE = 64
# Actor/Critical Neural Network Architecture
LAYER_1_SIZE = 400
LAYER_2_SIZE = 300
# Discount factor reflects the agents preference for short-term rewards over long-term rewards
GAMMA = 0.99
# Tau reflects how quickly target networks should be updated
TAU = 0.001
# Environment Variables
# Environment Name
# ENV_NAME = 'BipedalWalker-v2'
ENV_NAME = 'MountainCarContinuous-v0'
# Result storage locations
MONITOR_DIR = './results/biped_restart_8/gym_ddpg'
SUMMARY_DIR = './results/biped_restart_8/tf_ddpg'
RANDOM_SEED = 25
# The train function implements the two-step learning cycle.
def train(sess, env, actor, critic,RESTORE):
sess.run(tf.global_variables_initializer())
# Initialize random noise generator
exploration_noise = OUNoise(env.action_space.shape[0])
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay buffER
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
# Store q values for illustration purposes
q_max_array = []
for i in xrange(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in xrange(MAX_EP_STEPS):
env.render()
# Begin "Experimentation and Evaluation Phase"
# Seleect next experimental action by adding noise to action prescribed by policy
a = actor.predict(np.reshape(s, (1, actor.s_dim)))
# If in a testing episode, do not add noise
if i%100 is not 49 and i%100 is not 99:
noise = exploration_noise.noise()
a = a + noise
# Take step with experimental action
s2, r, terminal, info = env.step(np.reshape(a.T,newshape=(env.action_space.shape[0],)))
# Add transition to replay buffer if not testing episode
if i%100 is not 49 and i%100 is not 99:
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replay_buffer.sample_batch(MINIBATCH_SIZE)
# Find target estimate to use for updating the Q-function
# Predict_traget function determines Q-value of next state
target_q = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# Complete target estimate (R(t+1) + Q(s(t+1),a(t+1)))
y_i = []
for k in xrange(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA * target_q[k])
# Perform gradient descent to update critic
predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(y_i, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value, axis = 0)
# Perform "Learning" phase by moving policy parameters in direction of deterministic policy gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
# If episode is finished, print results
if terminal:
if i%100 is 49 or i%100 is 99:
print("Testing")
else:
print("Training")
print '| Reward: %.2i' % int(ep_reward), " | Episode", i, '| Qmax: %.4f' % (ep_ave_max_q / float(j))
q_max_array.append(ep_ave_max_q / float(j))
break
plt.plot(q_max_array)
plt.xlabel('Episode Number')
plt.ylabel('Max Q-Value')
plt.show()
# Begin program
def main():
with tf.Session() as sess:
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Check environment dimensions
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
# print("Sample Action: ")
# print(env.action_space.sample())
# print("Sample Shape")
# print(np.shape(env.action_space.sample()))
# print("Valid Action")
# val_act = np.array([[1.05],[0.5],[-1.3],[0.2]])
# print(env.action_space.contains(val_act))
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
# Build actor and critic networks
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim,
CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars())
# Film training videos if applicable
env = wrappers.Monitor(env, MONITOR_DIR, force=True, video_callable=lambda episode_id: episode_id%49==0)
train(sess, env, actor, critic,RESTORE)
main()
# In[ ]:
# In[ ]:
| [] |
2024-01-10 | ser-ge/sly_llama | sly_llama~agents~mrkl.py | """
Using sly and sly_llama to implement MRKL
"""
from typing import Callable, ContextManager, List, Optional
from pydantic import BaseModel, root_validator
from sly_llama import llm_call, LlmException
from sly_llama.models import LlmOutput
from langchain import OpenAI
from sly_llama import RetryException
# TODO set model name in .env
# TODO factor out langchain lllm, call OpenAI directly
llm = OpenAI(model_name="gpt-4")
class MrklOutput(LlmOutput):
"""
Model to validate the output of the Mrkl llm call
Attributes:
action: The action taken by the Mrkl llm
action_input: The input to the action
thought: The thought process of the Mrkl llm
final_answer: The final answer of the Mrkl llm
"""
action: Optional[str] = None
action_input: Optional[str] = None
thought: Optional[str] = None
final_answer: Optional[str] = None
@staticmethod
@_(r"Thought:((.|\n)*?)(?=Action|Final)")
def THOUGHT(matches: List):
return matches[0][1].strip()
@_(r"Action Input:((.|\n)*?)(?=Observation)")
def ACTION_INPUT(matches: List):
return matches[0][1].strip()
@_(r"Action:((.|\n)*?)(?=Action Input)")
def ACTION(matches):
return matches[0][1].strip()
@_(r"Final Answer:((.|\n |\s)*)")
def FINAL_ANSWER(matches):
return matches[0][1].strip()
@root_validator(pre=True)
def check_action_or_answer(cls, values):
"""
Ensure that either an action or a final answer is given.
"""
action = "action_input" in values and "action" in values
answer = "final_answer" in values
if not any([action, answer]):
raise LlmException(
"You must either choose an action or give the final answer"
)
return values
@llm_call(
llm,
stop_sequence="Observation",
verbose=False,
return_prompt=True,
return_llm_output=True,
)
def mrkl_start(tools, tool_names, request) -> MrklOutput:
"""
You are a helpful assistant designed to answer quesitons.
You have access to the following tools:
{tools}
Use the following format:
Request: the question you must answer if you can
Thought: you should always think about what to do
Action: name of the tool to use, should be one of [{tool_names}]
Action Input: the input to the tool
Observation: the result of the tool
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: [
"is_possible": <boolean indicating if the request has been successfully answered>,
"explanation": <a description of why and how the request can or cannot be answered>,
"answer" : <the answer directly addressing the request>
]
Begin!
Request: {request}
"""
@llm_call(
llm,
stop_sequence="\nObservation",
verbose=False,
return_prompt=True,
return_llm_output=True,
)
def mrkl_step(history:str, current_observation:str) -> MrklOutput:
"""
{history}
{current_observation}
"""
def insert_newline_after_match(string: str, pattern: str = "Action Input:"):
"""
Inserts a newline after the given pattern in the given string.
"""
return string.replace(pattern, pattern + "\n")
def mrkl_agent(
query: str, tools_list: List, max_iters: int, max_retries: int
) -> str | None:
"""
Runs the MRKL agent with the given query, tools list, and maximum number of iterations.
Parameters
----------
query : str
The query to be used by the MRLKL agent.
tools_list : List[Tool]
A list of tools to be used by the MRLKL agent.
max_iters : int
The maximum number of iterations to run the MRLKL agent for.
max_retries : int
The maximum number of retries to attempt before giving up if LlmException is thrown
"""
tools = {tool.name: tool for tool in tools_list}
tool_info = str(({t.name: t.description for t in tools.values()}))
tool_names = str(tools.keys())
# Start the MRLKL agent with the initial conditions
for _ in range(max_retries):
try:
mrkl_output, first_prompt, raw_output = mrkl_start(tool_info, tool_names, query)
break
except LlmException as e:
query = query + '\n' + e.message
error_message = e
else:
raise RetryException(error_message)
last_output = insert_newline_after_match(raw_output, "Action Input:")
history = first_prompt + last_output
print(history)
for _ in range(max_iters):
# if chosen action in tool run the tool and set observation
if mrkl_output.action in tools:
current_observation = tools[mrkl_output.action](mrkl_output.action_input)
else:
current_observation = (
f"{mrkl_output.action} not a valid tool, try another one"
)
# run a single mrkl step until the output can be parsed correctly or max_retries is reached
for _ in range(max_retries):
try:
mrkl_output, last_prompt, raw_output = mrkl_step(
history, current_observation
)
break
# add error message to observation for the next retry loop
except LlmException as e:
current_observation = current_observation + e.message
error_message =e
else:
raise RetryException(f"mrkl_step exceeeded retries, last error: {error_message}")
# the llm one shot learns better if it can see last action separated by new line, esp code indent
last_output = insert_newline_after_match(raw_output, "Action Input:")
history = last_prompt + last_output
print(last_prompt)
print(last_output)
if mrkl_output.final_answer:
return mrkl_output.final_answer
| [] |
2024-01-10 | ser-ge/sly_llama | sly_llama~examples~simple_example.py | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: py,ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.5
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from typing import List, Optional
from sly_llama import llm_call
from langchain.llms import OpenAI
from pydantic import BaseModel
llm = OpenAI()
# + [markdown]
"""
#### Lets define what the add function does and wrap it in an llm call
"""
# -
@llm_call(llm)
def add(x: str, y: str) -> str:
"""
calculate {x} + {y}
only return the number and nothing else
"""
add(1, 2)
add(1, 3) + add(1, 1)
# #### Problem: strings don't add, lets try again but with ints
#
# +
@llm_call(llm)
def add(x: str, y: str) -> int:
"""
calculate {x} + {y}
only return the number and nothing else
"""
add(1, 3) + add(1, 1)
# + [markdown]
"""
Lets make a recipe
"""
# +
@llm_call(llm)
def get_recipe(dish: str, units: str) -> str:
"""
Write a resipe for this {dish}
Be sure to include all the ingridients in {units} units.
ingridients: < neccesary ingridients>
intructions: < the instructions for making the dish>
vegan : <this value must be one of [True, False] indicating weather the recipe is vegan>
"""
# -
print(get_recipe("jank", "metric"))
# + [markdown]
"""
#### That's great but what if we want to parse the output to a pydantic class
#### Let define the output class and how we want to parse the llm output
"""
# +
from pydantic import BaseModel
class Recipe(BaseModel):
ingridients: str | List[str]
instructions: str | List[str]
vegan: bool
@classmethod
def from_llm_output(cls, llm_output: str):
recipe = {}
parts = llm_output.casefold().partition("instructions")
recipe["ingridients"] = (
parts[0].replace("ingridients", "").replace('[],"', "").strip().split("\n")
)
recipe["instructions"] = (
parts[2].partition("vegan")[0].replace('[],"', "").strip().split("\n")
)
recipe["vegan"] = bool(
parts[2].partition("vegan")[1].replace('[],"\n', "").strip()
)
return cls.parse_obj(recipe)
# + [markdown]
"""
#### And ammend the return type
"""
# -
@llm_call(llm)
def get_recipe(dish: str, units: str) -> Recipe:
"""
Write a resipe for this {dish}
Be sure to include all the ingridients in {units} units.
ingridients: < neccesary ingridients>
intructions: < the instructions for making the dish>
vegan : <this value must be one of [True, False] indicating weather the recipe is vegan>
"""
recipe = get_recipe("kchapuri", "metric")
recipe.instructions
# + [markdown]
"""
#### Hmm that was a lot of work and looks like we did not do a good job, lets ask it to give us some juicy JSON
"""
# +
from sly_llama import JsonBaseModel
class Recipe(JsonBaseModel):
ingridients: str | List[str]
instructions: str | List[str]
vegan: bool
# + [markdown]
"""
#### Llamas are not so good at json so may be let it learn from its mistakes
"""
# -
@llm_call(llm)
def get_recipe(dish: str, units: str, error_message: str) -> Recipe:
"""
Write a resipe for this {dish}
Be sure to include all the ingridients in {units} units.
You should provide your response in JSON Format
ingridients: < neccesary ingridients>
intructions: < the instructions for making the dish>
vegan : <this value must be one of [True, False] indicating weather the recipe is vegan>
{error_message}
"""
# +
from sly_llama import LlmException
recipe = None
error_message = ""
while not recipe:
try:
recipe = get_recipe("kchapuri", "metric", error_message)
except LlmException as e:
error_message = e.message
print(error_message)
recipe
# -
recipe.ingridients
| [] |
2024-01-10 | Nadav-Nesher/entity_aspect_analysis | helper_funcs.py | """
This module contains helper functions and configurations for interacting with the OpenAI API.
It includes:
- Setting the API key for OpenAI.
- Defining an enumeration for specifying response formats.
- Function to get completions from the OpenAI model.
Imports:
- openai: OpenAI's API client.
- Enum: A Python standard library for creating enumerations..
- OPENAI_API_KEY from secret: A module to securely fetch the OpenAI API key.
"""
from openai import OpenAI
from enum import Enum
from typing import Dict, List
# TODO: move API key to env var
from secret import OPENAI_API_KEY
# Initialize an OpenAI client
client = OpenAI(api_key=OPENAI_API_KEY)
# Define the ResponseFormat enum
class ResponseFormat(Enum):
"""
An enumeration class to define response format types for OpenAI API requests.
Attributes:
TEXT: Specifies a response format of type text.
JSON_OBJECT: Specifies a response format as a JSON object.
"""
TEXT = {"type": "text"}
JSON_OBJECT = {"type": "json_object"}
def get_completion_from_messages(messages: List[Dict[str, str]],
model: str = "gpt-3.5-turbo-1106",
frequency_penalty: float = 0,
n: int = 1,
temperature: float = 0,
response_format: ResponseFormat = ResponseFormat.TEXT) -> str:
"""
Retrieves a response from the OpenAI API based on the provided messages and parameters.
Parameters:
messages (List[Dict[str,str): A list of messages (dicts) to send to the OpenAI API for generating completions.
model (str): The model to use for generating completions. Default is "gpt-3.5-turbo-1106".
frequency_penalty (float): A penalty to apply to increase or decrease the likelihood of new information. Default is 0.
n (int): The number of completions to generate. Default is 1.
temperature (float): Controls randomness in the response generation. Lower values mean less random responses. Default is 0.
response_format (ResponseFormat): The format of the response from the OpenAI API. Default is ResponseFormat.TEXT.
Returns:
str: The content of the response message from the OpenAI API.
"""
response = client.chat.completions.create(
messages=messages,
model=model,
frequency_penalty=frequency_penalty,
n=n,
temperature=temperature,
response_format=response_format.value
)
return response.choices[0].message.content | [] |
2024-01-10 | ual-cci/Text2Analysis | lda_tests_1prep.py | # following the nicely written https://www.machinelearningplus.com/nlp/topic-modeling-gensim-python/
DATASET = "_jobsv1_all"
DATASET = "_jobsv1_goodq"
import numpy as np
data = np.load("data/documents"+DATASET+".npz")['a']
import re
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# CUSTOM STOP WORDS?
stoplist = set(', . : / ( ) [ ] - _ ; * & ? ! – a b c d e t i p an us on 000 if it ll to as are then '
'they our the you we s in if a m I x re to this at ref do and'.split())
stop_words.extend(stoplist)
stoplist = set('experience job ensure able working join key apply strong recruitment work team successful '
'paid contact email role skills company day good high time required want right success'
'ideal needs feel send yes no arisen arise title true'.split())
stop_words.extend(stoplist)
stoplist = set('work experience role application process contract interested touch'.split())
stop_words.extend(stoplist)
print(len(data[:1][0]))
pprint(data[:1])
# Remove Emails
data = [re.sub('\S*@\S*\s?', '', doc) for doc in data]
# Remove new line characters
data = [re.sub('\s+', ' ', doc) for doc in data]
# Remove distracting single quotes
data = [re.sub("\'", "", doc) for doc in data]
print(len(data[:1][0]))
pprint(data[:1][0])
def sent_to_words(sentences):
for sentence in sentences:
# remove accent, remove too short and too long words
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(data))
print(data_words[:1])
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# See trigram example
print(len(trigram_mod[bigram_mod[data_words[0]]]))
print(trigram_mod[bigram_mod[data_words[0]]])
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, nlp, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, nlp, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
### Data prepared, save
texts = data_lemmatized
np.savez_compressed("data/texts"+DATASET+".npz", a=texts)
| [] |
2024-01-10 | ual-cci/Text2Analysis | nlp_functions.py | import re
import pandas as pd
from pprint import pprint
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import spacy
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
from tqdm import tqdm
# Meta optimalization - selection of the best number of topics for a model:
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3, mallet_lda = False):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
print("Trying LDA model with",num_topics,"topics.")
mallet_path = '../mallet-2.0.8/bin/mallet' # update this path
if mallet_lda:
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=dictionary)
else:
model = gensim.models.ldamodel.LdaModel(corpus=corpus,id2word=dictionary,num_topics=num_topics,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
def LDA_best_number_of_topics(id2word, corpus, texts, topics_start, topics_end, topics_step, mallet_lda, plot_name):
model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=texts,
start=topics_start, limit=topics_end, step=topics_step, mallet_lda=mallet_lda)
# Show graph
x = range(topics_start, topics_end, topics_step)
print("x (Num Topics)",x)
print("coherence_values",coherence_values)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.savefig(plot_name)
#plt.show()
plt.close()
# Print the coherence scores
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
# Statistics:
def format_topics_sentences(ldamodel, corpus, texts):
# Init output
sent_topics_df = pd.DataFrame()
dominant_topics_as_arr = []
# Get main topic in each document
for i, document in enumerate(ldamodel[corpus]):
belonging_to_topic = document[0]
belonging_to_topic = sorted(belonging_to_topic, key=lambda x: (x[1]), reverse=True)
topic_num, prop_topic = belonging_to_topic[0]
dominant_topics_as_arr.append(topic_num)
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic, 4), topic_keywords]),
ignore_index=True)
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return sent_topics_df, dominant_topics_as_arr
# Helper functions:
def sentences_to_words(sentences):
for sentence in sentences:
# remove accent, remove too short and too long words
yield (gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts, stop_words):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(bigram_model, texts):
return [bigram_model[doc] for doc in texts]
def make_trigrams(trigram_model, bigram_model, texts):
return [trigram_model[bigram_model[doc]] for doc in texts]
def lemmatization(texts, nlp, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in tqdm(texts):
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
| [] |
2024-01-10 | ual-cci/Text2Analysis | nlp_tools.py | import matplotlib
matplotlib.use('Agg')
from nlp_functions import *
import numpy as np
import os
import re
import shutil
from zipfile import ZipFile
import random
from pprint import pprint
import pandas as pd
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from nltk.corpus import stopwords
import matplotlib.colors as mcolors
import imageio
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
class NLPTools(object):
"""
Main class for NLP related functions ...
"""
# TODO:
# - function that goes from text into pyLDA visualization = RAW TEXT
# - pyLDA obv
# - wordclouds
# - function which processes data as list of articles (for example) = LIST OF TEXTS
# - [optional] caption for each of the articles = CAPTIONS
# - this one allows tsne (color by topics), histogram distribution over topics
# (optional caption as mouseover)
# - and (optionally) also the other analysis on pure joined text
# * def prepare - loads input(s)
# * def pyLDA
# def wordclouds
# def list_tsne
# def list_histograms
# def optimal_number_of_topics(min, max) -> returns scores over coherences + best coherence
def __init__(self, settings):
self.settings = settings
self.verbose = 1 # 1 normal, 2 print everything including examples ... 0 silent
self.colors_topics = [color for name, color in mcolors.TABLEAU_COLORS.items()] + [color for name, color in
mcolors.XKCD_COLORS.items()]
self.processing_mode = -1 # -1 = no data loaded, 1 = raw text loaded, 2 = texts loaded in list,
# 3 = texts loaded in list including captions
self.stop_words = self.load_stopwords()
self.id2word = None
self.corpus = None
self.lda_model = None
def cleanup(self):
del self.list_of_texts_data
del self.list_of_captions_data
del self.id2word
del self.corpus
del self.lda_model
### Helper functions:
def load_splitting_by_sentences(self, raw_text_input):
V = 1
#V = 2 faster yes, but i want to keep original terms only limited to some types - another func maybe?
sentences = gensim.summarization.textcleaner.split_sentences(raw_text_input)
sentences_cleaned, sentences_lemmatized = self.preprocessing(sentences, self.stop_words)
"""
elif V==2: #TODO TEST IF ITS THE SAME, BUT FASTER!
sentences_as_syntactic_units = gensim.summarization.textcleaner.clean_text_by_sentences(raw_text_input)
sentences_lemmatized = [str(s.token).split(" ") for s in sentences_as_syntactic_units]
"""
print("Raw input of",len(raw_text_input),"was split into",len(sentences_lemmatized),"sentences.")
self.list_of_texts_data = sentences_lemmatized
self.list_of_captions_data = None
self.stats_n_chars = len(raw_text_input)
self.stats_n_documents = len(self.list_of_texts_data)
# Shuffle after loading!
self.shuffle()
def load_list_of_data(self, list_of_texts_input, list_of_captions_input=None):
print("Loaded",len(list_of_texts_input),"documents.")
documents_cleaned, documents_lemmatized = self.preprocessing(list_of_texts_input, self.stop_words)
self.list_of_texts_data = documents_lemmatized
self.list_of_captions_data = list_of_captions_input
self.stats_n_chars = sum([len(doc) for doc in list_of_texts_input])
self.stats_n_documents = len(list_of_texts_input)
# Shuffle after loading!
self.shuffle()
def shuffle(self):
print("Shuffling data!")
if self.list_of_captions_data is not None:
c = list(zip(self.list_of_texts_data, self.list_of_captions_data))
random.shuffle(c)
self.list_of_texts_data, self.list_of_captions_data = zip(*c)
else:
random.shuffle(self.list_of_texts_data)
def restart_workspace(self):
# restart / file cleanup!:
files = ["save.zip", "templates/plots/LDA_Visualization.html"]
for i in range(self.LDA_number_of_topics):
files.append("static/wordclouds_"+str(i).zfill(2)+".png")
for file in files:
if os.path.exists(file):
os.remove(file)
print("deleted", file)
def prepare_workspace(self, folder_name):
plot_dir = "templates/plots/" + folder_name
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
if not os.path.exists("data"):
os.makedirs("data")
plot_dir = "static/"+folder_name+"/"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
def zip_files(self, archive_name, files_to_add):
print("zipping")
#output_filename = "save"
#dir_name = "templates/plots"
#shutil.make_archive(output_filename, 'zip', dir_name)
# create a ZipFile object
zipObj = ZipFile(archive_name, 'w')
for file in files_to_add:
arcname = file.split("/")[-1]
zipObj.write(file, arcname)
print("-added", file,"to zip as", arcname)
zipObj.close()
def load_stopwords(self):
# NLTK Stop words
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# CUSTOM STOP WORDS?
stoplist = set(', . : / ( ) [ ] - _ ; * & ? ! – a b c d e t i p an us on 000 if it ll to as are then '
'they our the you we s in if a m I x re to this at ref do and'.split())
stop_words.extend(stoplist)
stoplist = set('experience job ensure able working join key apply strong recruitment work team successful '
'paid contact email role skills company day good high time required want right success'
'ideal needs feel send yes no arisen arise title true work role application process contract '
'interested touch'.split())
stop_words.extend(stoplist)
return stop_words
def preprocessing(self, data, stop_words):
# data contains a list of either sentences or documents (list of strings)
if self.verbose > 1:
print("loaded text")
print(len(data[:1][0]))
pprint(data[:1])
# Remove Emails
print("-removing emails")
data = [re.sub('\S*@\S*\s?', '', doc) for doc in data]
# Remove new line characters
print("-removing new lines")
data = [re.sub('\s+', ' ', doc) for doc in data]
# Remove distracting single quotes
print("-removing single quotes")
data = [re.sub("\'", "", doc) for doc in data]
if self.verbose > 1:
print("removed special chars text")
print(len(data[:1][0]))
pprint(data[:1][0])
print("-sentences to words")
data_words = list(sentences_to_words(data))
if self.verbose > 1:
print(data_words[:1])
# Build the bigram and trigram models
print("-bigrams")
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words, stop_words)
# Form Bigrams
data_words_bigrams = make_bigrams(bigram_mod, data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
nlp.max_length = 9000000
# Do lemmatization keeping only noun, adj, vb, adv
print("-lemmatization")
data_lemmatized = lemmatization(data_words_bigrams, nlp, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
count = 0
for l in data_lemmatized:
count += len(l)
print("number of lemmatized words:", count)
if self.verbose > 1:
print(data_lemmatized[:1])
return data, data_lemmatized
def prep_data_lemmatized(self):
data_cleaned, data_lemmatized = self.preprocessing(self.list_of_texts_data, self.stop_words)
return data_lemmatized
def prepare_lda_model(self, data_lemmatized):
# Build LDA model
self.id2word = corpora.Dictionary(data_lemmatized)
self.corpus = [self.id2word.doc2bow(text) for text in data_lemmatized]
#if self.verbose > 0:
print("Building/Loading LDA model (this takes time) with", self.LDA_number_of_topics, "topics")
self.lda_model = gensim.models.ldamodel.LdaModel(corpus=self.corpus, id2word=self.id2word, num_topics=self.LDA_number_of_topics,
random_state=100, update_every=1, chunksize=100, passes=10,
alpha='auto', per_word_topics=True)
#self.lda_model.save('data/model_LDAEXAMPLE.lda')
#self.lda_model = gensim.models.LdaModel.load('data/model_LDAEXAMPLE.lda')
### Specific analysis functions
def analyze_pyLDA(self, pyLDAviz_name):
if (self.lda_model is None or self.corpus is None or self.id2word is None):
print("LDA model is not ready, call that first!")
assert False
print("Saving pyLDA visualization into > ", pyLDAviz_name)
import pyLDAvis
import pyLDAvis.gensim # don't skip this
vis = pyLDAvis.gensim.prepare(self.lda_model, self.corpus, self.id2word)
pyLDAvis.save_html(vis, pyLDAviz_name)
del vis
print("-done")
def analyze_wordclouds(self, NAME_wordclouds):
if (self.lda_model is None):
print("LDA model is not ready, call that first!")
assert False
print("Saving wordclouds into >", NAME_wordclouds)
topics = self.lda_model.show_topics(num_topics=self.LDA_number_of_topics, formatted=False)
#""" # assuming that the plt.figure() opening and closing made problems with gcloud concurrecy...
for topic in topics:
topic_i = topic[0]
topic_words = topic[1]
print("topic", topic_i, "===", topic_words)
topic_words = dict(topic_words)
cloud = WordCloud(stopwords=self.stop_words, background_color='white', width=2500, height=1800,
max_words=10, colormap='tab10',
color_func=lambda *args, **kwargs: self.colors_topics[topic_i],
prefer_horizontal=1.0)
cloud.generate_from_frequencies(topic_words, max_font_size=300)
imageio.imwrite(NAME_wordclouds + str(topic_i).zfill(2) + ".png", cloud)
""" # this alternative was causing issues ...
fig = plt.figure()
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(topic_i), fontdict=dict(size=16))
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.savefig(self.NAME_wordclouds + str(topic_i).zfill(2) + ".png")
plt.close()
"""
del cloud
print("-done")
def analyze_tsne(self,NAME_tsne):
print("Saving TSNE visualization into >", NAME_tsne)
from sklearn.manifold import TSNE
from bokeh.plotting import figure, output_file, show, save, ColumnDataSource
from bokeh.models import HoverTool, WheelZoomTool, PanTool, BoxZoomTool, ResetTool, SaveTool
# Get topic weights
doc_features = []
doc_titles = []
doc_dominanttopics = []
for i, row_list in enumerate(self.lda_model[self.corpus]):
# What we have in the encoding:
# row_list[0] = Document topics: [(0, 0.87507219282484316), (1, 0.12492780717515681)]
# row_list[1] = Word topics: [(0, [0, 1]), (3, [0, 1]), (4, [0, 1]), (7, [0, 1])]
# row_list[2] = Phi values: [(0, [(0, 0.9783234200583657), (1, 0.021676579941634355)]), (3, [(0, 0.93272653621872503), (1, 0.067273463781275009)]), (4, [(0, 0.98919912227661466), (1, 0.010800877723385368)]), (7, [(0, 0.97541896333079636), (1, 0.024581036669203641)])]
# row_list[0] has the weights to topics
# This means that one document was encoded into the LDA_number_of_topics topics we chose
tmp = np.zeros(self.LDA_number_of_topics)
max_w = -1
max_w_idx = -1
for j, w in row_list[0]:
tmp[j] = w
if max_w < w:
max_w_idx = j
max_w = w
doc_features.append(tmp)
doc_dominanttopics.append(max_w_idx)
doc_titles.append(self.list_of_captions_data[i])
arr = pd.DataFrame(doc_features).fillna(0).values
# tSNE Dimension Reduction
tsne_model = TSNE(n_components=2, verbose=1, random_state=0, angle=.99, init='pca')
tsne_lda = tsne_model.fit_transform(arr)
TOOLTIPS = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("desc", "@desc"),
]
mycolors = np.array(self.colors_topics)
hover = HoverTool(tooltips=TOOLTIPS)
tools = [hover, WheelZoomTool(), PanTool(), BoxZoomTool(), ResetTool(), SaveTool()]
plot = figure(title="t-SNE Clustering of {} LDA Topics".format(self.LDA_number_of_topics),
tools=tools, plot_width=900, plot_height=700)
source = ColumnDataSource(data=dict(
x=tsne_lda[:, 0],
y=tsne_lda[:, 1],
desc=doc_titles,
color=mycolors[doc_dominanttopics],
))
plot.scatter(x='x', y='y', source=source, color='color')
output_file(NAME_tsne)
save(plot)
# show(plot)
# clean
del tsne_model, tsne_lda, arr, plot, source, hover, tools, doc_features, doc_titles, doc_dominanttopics
### Main called functions
def analyze_raw_text(self, number_of_topics=5, folder_name = "demo-folder"):
# Load input data
data_lemmatized = self.list_of_texts_data
self.LDA_number_of_topics = number_of_topics
# Prepare the workspace folders
self.restart_workspace()
plot_dir = "templates/plots/" + folder_name
self.prepare_workspace(folder_name)
# Prepare the model
self.prepare_lda_model(data_lemmatized)
# Complete analysis
pyLDAviz_name = plot_dir+"/LDA_Visualization.html"
self.analyze_pyLDA(pyLDAviz_name)
NAME_wordclouds = "static/"+folder_name+"/wordclouds_" # +i+.png
self.analyze_wordclouds(NAME_wordclouds)
files_to_zip = [pyLDAviz_name]
# Additionally we can also call the
# list_tsne
# list_histograms
if self.list_of_captions_data is not None:
NAME_tsne = plot_dir+"/tsne.html"
self.analyze_tsne(NAME_tsne)
files_to_zip.append(NAME_tsne)
for i in range(self.LDA_number_of_topics):
files_to_zip.append("static/"+folder_name+"/wordclouds_"+str(i).zfill(2)+".png")
archive_name = "templates/plots/" + folder_name + "/analysis.zip"
self.zip_files(archive_name, files_to_zip)
return "Analysis ready!"
###################################################
###################################################
###################################################
###################################################
###################################################
# Bak:
def analyze_full_bak(self):
self.restart_workspace()
sentences_lemmatized = self.list_of_texts_data
DATASET = ""
plot_dir = "templates/plots" + DATASET + "/"
self.prepare_workspace(plot_dir)
### Settings:
METAOPT_num_of_topics = False
NAME_METAOPT_plot = plot_dir + "LDA_best_number_of_topics_"
LOAD_lda_model = False
#LDA_number_of_topics = 13 # Wait for metaopt!
LDA_number_of_topics = 4 # Wait for metaopt!
CALC_coherence = True
VIZ_wordclouds = True
VIZ_html_interactive = True # SLOW and last
NAME_wordclouds = plot_dir + "wordclouds_" # +i+.png
NAME_html_interactive = plot_dir + "LDA_Visualization.html" # "vis.html"
DEBUG_print_docs = False
DEBUG_print_topics = False
# GENSIM analysis
id2word = corpora.Dictionary(sentences_lemmatized)
corpus = [id2word.doc2bow(text) for text in sentences_lemmatized]
"""
if DEBUG_print_docs:
print("document in vector format:", corpus[:1])
print("readable form:", [[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]])
print()
if METAOPT_num_of_topics:
# Can take a long time to run.
topics_start = 5
topics_end = 15 # non exclusive
topics_step = 1
plot_name = NAME_METAOPT_plot + str(topics_start) + "TO" + str(topics_end) + "BY" + str(
topics_step) + ".png"
LDA_best_number_of_topics(id2word, corpus, sentences_lemmatized, topics_start, topics_end, topics_step,
mallet_lda=LDA_tryMallet, plot_name=plot_name)
print("Analyze the results of the meta-optimalization ^^^")
assert False
"""
# Build LDA model
if self.verbose > 0:
print("Building/Loading LDA model (takes time)")
if LOAD_lda_model:
lda_model = gensim.models.LdaModel.load('data/model_LDAEXAMPLE.lda')
else:
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=id2word, num_topics=LDA_number_of_topics,
random_state=100, update_every=1, chunksize=100, passes=10,
alpha='auto', per_word_topics=True)
lda_model.save('data/model_LDAEXAMPLE.lda')
if DEBUG_print_topics:
print("Topics:")
pprint(lda_model.print_topics(num_topics=LDA_number_of_topics, num_words=5))
# Evaluation - Perplexity, Coherence Score
print('\nPerplexity: ',
lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
if CALC_coherence:
coherence_model_lda = CoherenceModel(model=lda_model, texts=sentences_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# Dominant topics per document
"""
if SAVE_doc2topic or SAVE_topic2docs or SAVE_topic2num or VIZ_hist:
df_topic_sents_keywords, dominant_topics_as_arr = format_topics_sentences(ldamodel=lda_model, corpus=corpus,
texts=sentences)
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
"""
# Wordclouds of Top N words in each topic
if VIZ_wordclouds:
print("Saving wordclouds into >", NAME_wordclouds, "...")
# topics = lda_model.show_topics(formatted=False)
topics = lda_model.show_topics(num_topics=LDA_number_of_topics, formatted=False)
for i_t, topic in enumerate(topics):
topic_i = topic[0]
topic_words = topic[1]
print("topic", topic_i, "===", topic_words)
fig = plt.figure()
topic_words = dict(topic_words)
cloud = WordCloud(stopwords=self.stop_words, background_color='white', width=2500, height=1800,
max_words=10, colormap='tab10',
color_func=lambda *args, **kwargs: self.colors_topics[topic_i],
prefer_horizontal=1.0)
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(topic_i), fontdict=dict(size=16))
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.savefig(NAME_wordclouds + str(topic_i).zfill(2) + ".png")
plt.close()
if VIZ_html_interactive:
# Takes forever!
print("creating visualization...")
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
print("saving it...")
pyLDAvis.save_html(vis, NAME_html_interactive)
print("done")
output_filename = "save"
dir_name = "templates/plots"
shutil.make_archive(output_filename, 'zip', dir_name)
return NAME_html_interactive
def viz_tsne(self, titles, plot_dir, lda_model, corpus, LDA_number_of_topics):
NAME_tsne = plot_dir + "tsne.html"
print("Saving TSNE visualization into >", NAME_tsne)
from sklearn.manifold import TSNE
from bokeh.plotting import figure, output_file, show, save, ColumnDataSource
# Get topic weights
doc_features = []
doc_titles = []
doc_dominanttopics = []
for i, row_list in enumerate(lda_model[corpus]):
# What we have in the encoding:
# row_list[0] = Document topics: [(0, 0.87507219282484316), (1, 0.12492780717515681)]
# row_list[1] = Word topics: [(0, [0, 1]), (3, [0, 1]), (4, [0, 1]), (7, [0, 1])]
# row_list[2] = Phi values: [(0, [(0, 0.9783234200583657), (1, 0.021676579941634355)]), (3, [(0, 0.93272653621872503), (1, 0.067273463781275009)]), (4, [(0, 0.98919912227661466), (1, 0.010800877723385368)]), (7, [(0, 0.97541896333079636), (1, 0.024581036669203641)])]
# row_list[0] has the weights to topics
# This means that one document was encoded into the LDA_number_of_topics topics we chose
tmp = np.zeros(LDA_number_of_topics)
max_w = -1
max_w_idx = -1
for j, w in row_list[0]:
tmp[j] = w
if max_w < w:
max_w_idx = j
max_w = w
doc_features.append(tmp)
doc_dominanttopics.append(max_w_idx)
doc_titles.append(titles[i])
arr = pd.DataFrame(doc_features).fillna(0).values
# tSNE Dimension Reduction
tsne_model = TSNE(n_components=2, verbose=1, random_state=0, angle=.99, init='pca')
tsne_lda = tsne_model.fit_transform(arr)
TOOLTIPS = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("desc", "@desc"),
]
mycolors = np.array(self.colors_topics)
from bokeh.models import HoverTool, WheelZoomTool, PanTool, BoxZoomTool, ResetTool, SaveTool
hover = HoverTool(tooltips=TOOLTIPS)
tools = [hover, WheelZoomTool(), PanTool(), BoxZoomTool(), ResetTool(), SaveTool()]
# plot = figure(title="t-SNE Clustering of {} LDA Topics".format(LDA_number_of_topics),
# tooltips=TOOLTIPS, plot_width=900, plot_height=700)
plot = figure(title="t-SNE Clustering of {} LDA Topics".format(LDA_number_of_topics),
tools=tools, plot_width=900, plot_height=700)
source = ColumnDataSource(data=dict(
x=tsne_lda[:, 0],
y=tsne_lda[:, 1],
desc=doc_titles,
color=mycolors[doc_dominanttopics],
))
plot.scatter(x='x', y='y', source=source, color='color')
output_file(NAME_tsne)
save(plot)
# show(plot)
def viz_hist(self, plot_dir, dominant_topics_as_arr, LDA_number_of_topics, lda_model):
NAME_hist = plot_dir + "hist_topics.png"
print("Saving histogram into >", NAME_hist)
hist_tmp = {}
for val in dominant_topics_as_arr:
if val not in hist_tmp:
hist_tmp[val] = 0
else:
hist_tmp[val] += 1
print("My own hist:", hist_tmp)
xs = list(range(LDA_number_of_topics))
ys = [0] * LDA_number_of_topics
for topic_num, val in hist_tmp.items():
wp = lda_model.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp]) # removed probabilities
print("topic", topic_num, ":", val, "time(s) , keywords = ", topic_keywords)
ys[topic_num] = val
plt.bar(xs, ys)
plt.xticks(np.arange(LDA_number_of_topics), np.arange(LDA_number_of_topics))
plt.savefig(NAME_hist)
plt.close()
def save_csv_documents(self, plot_dir, df_dominant_topic, df_topic_sents_keywords):
SAVE_doc2topic = False # don't really need
NAME_doc2topic = plot_dir + "doc2topic.csv"
SAVE_topic2docs = True
NAME_topic2docs = plot_dir + "topic2docs.csv"
SAVE_topic2num = True
NAME_topic2num = plot_dir + "topic2num.csv"
if SAVE_doc2topic:
df_dominant_topic.to_csv(NAME_doc2topic, index=True)
# Topic 2 representative documents
if SAVE_topic2docs:
sent_topics_sorteddf_mallet = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf_mallet = pd.concat(
[sent_topics_sorteddf_mallet, grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)],
axis=0)
sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
sent_topics_sorteddf_mallet.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"]
sent_topics_sorteddf_mallet.to_csv(NAME_topic2docs, index=True)
# Topic 2 number of docs
if SAVE_topic2num:
topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()
topic_contribution = round(topic_counts / topic_counts.sum(), 4)
topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]
df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)
df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']
df_dominant_topics.to_csv(NAME_topic2num, index=True)
| [] |
2024-01-10 | ual-cci/Text2Analysis | lda_tests_2analyze.py | # following the nicely written https://www.machinelearningplus.com/nlp/topic-modeling-gensim-python/
from nlp_functions import *
def main():
DATASET = "_jobsv1_all" # 10k jobs, but also some low quality ones
# OPT number of topics = ?
#DATASET = "_jobsv1_goodq" # 5k jobs, selecting only higher quality ones
# OPT number of topics = ?
#DATASET = "_stackv1_graphicdesignqa_sub20k" # 20k subsets of q'n'a from graphic design
# OPT number of topics = ?
import numpy as np
# < HAX numpy version
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
texts = np.load("data/texts"+DATASET+".npz")['a']
data = np.load("data/documents"+DATASET+".npz")['a']
titles = np.load("data/titles"+DATASET+".npz")['a']
# restore np.load for future normal usage
np.load = np_load_old
# HAX numpy version />
plot_dir = "plots"+DATASET+"/"
import os
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
import pandas as pd
from pprint import pprint
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
### Settings:
METAOPT_num_of_topics = True
NAME_METAOPT_plot = plot_dir+"LDA_best_number_of_topics_"
LOAD_lda_model = False
LDA_tryMallet = False # doesn't have the same support as generic LDA tho - convert differently?
LDA_number_of_topics = 13 # Wait for metaopt!
CALC_coherence = True
VIZ_hist = True
VIZ_wordclouds = True
VIZ_TSNE = True
VIZ_html_interactive = True #SLOW and last
NAME_hist = plot_dir+"hist_topics.png"
NAME_tsne = plot_dir+"tsne.html"
NAME_wordclouds = plot_dir+"wordclouds_" # +i+.png
NAME_html_interactive = plot_dir+"LDA_Visualization.html" #"vis.html"
DEBUG_print_docs = False
DEBUG_print_topics = False
SAVE_doc2topic = False # don't really need
NAME_doc2topic = plot_dir+"doc2topic.csv"
SAVE_topic2docs = True
NAME_topic2docs = plot_dir+"topic2docs.csv"
SAVE_topic2num = True
NAME_topic2num = plot_dir+"topic2num.csv"
# GENSIM analysis
id2word = corpora.Dictionary(texts)
corpus = [id2word.doc2bow(text) for text in texts]
if DEBUG_print_docs:
print("document in vector format:", corpus[:1])
print("readable form:", [[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]])
print()
if METAOPT_num_of_topics:
# Can take a long time to run.
topics_start = 5
topics_end = 15 # non exclusive
topics_step = 1
plot_name = NAME_METAOPT_plot + str(topics_start) + "TO" + str(topics_end) + "BY" + str(topics_step) + ".png"
LDA_best_number_of_topics(id2word, corpus, texts, topics_start, topics_end, topics_step, mallet_lda=LDA_tryMallet, plot_name=plot_name)
print("Analyze the results of the meta-optimalization ^^^")
assert False
# Build LDA model
print("Building/Loading LDA model (takes time)")
if LOAD_lda_model:
lda_model = gensim.models.LdaModel.load('data/model_LDAEXAMPLE.lda')
else:
if LDA_tryMallet:
# mallet LDA
mallet_path = '../mallet-2.0.8/bin/mallet' # update this path
lda_model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=LDA_number_of_topics, id2word=id2word)
# convert
lda_model = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_model)
else:
# normal LDA
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=LDA_number_of_topics,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
lda_model.save('data/model_LDAEXAMPLE.lda')
if DEBUG_print_topics:
print("Topics:")
pprint(lda_model.print_topics(num_topics=LDA_number_of_topics, num_words=5))
doc_lda = lda_model[corpus]
# Evaluation - Perplexity, Coherence Score
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
if CALC_coherence:
coherence_model_lda = CoherenceModel(model=lda_model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
######################################################################################################
# Dominant topics per document
if SAVE_doc2topic or SAVE_topic2docs or SAVE_topic2num or VIZ_hist:
df_topic_sents_keywords, dominant_topics_as_arr = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data)
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
if SAVE_doc2topic:
df_dominant_topic.to_csv(NAME_doc2topic, index=True)
# Topic 2 representative documents
if SAVE_topic2docs:
sent_topics_sorteddf_mallet = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf_mallet = pd.concat(
[sent_topics_sorteddf_mallet, grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], axis=0)
sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
sent_topics_sorteddf_mallet.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"]
sent_topics_sorteddf_mallet.to_csv(NAME_topic2docs, index=True)
# Topic 2 number of docs
if SAVE_topic2num:
topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()
topic_contribution = round(topic_counts / topic_counts.sum(), 4)
topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]
df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)
df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']
df_dominant_topics.to_csv(NAME_topic2num, index=True)
# As a plot:
if VIZ_hist:
print("Saving histogram into >", NAME_hist)
hist_tmp = {}
for val in dominant_topics_as_arr:
if val not in hist_tmp:
hist_tmp[val] = 0
else:
hist_tmp[val] += 1
print("My own hist:", hist_tmp)
xs = list(range(LDA_number_of_topics))
ys = [0]*LDA_number_of_topics
for topic_num,val in hist_tmp.items():
wp = lda_model.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp]) # removed probabilities
print("topic", topic_num, ":", val, "time(s) , keywords = ", topic_keywords)
ys[topic_num] = val
plt.bar(xs, ys)
plt.xticks(np.arange(LDA_number_of_topics), np.arange(LDA_number_of_topics))
plt.savefig(NAME_hist)
plt.close()
import matplotlib.colors as mcolors
colors_topics = [color for name, color in mcolors.TABLEAU_COLORS.items()] + [color for name, color in mcolors.XKCD_COLORS.items()]
# Wordclouds of Top N words in each topic
if VIZ_wordclouds:
print("Saving wordclouds into >", NAME_wordclouds, "...")
from matplotlib import pyplot as plt
from wordcloud import WordCloud
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
stoplist = set(', . : / ( ) [ ] - _ ; * & ? ! – a b c d e t i p an us on 000 if it ll to as are then '
'they our the you we s in if a m I x re to this at ref do and'.split())
stop_words.extend(stoplist)
stoplist = set('experience job ensure able working join key apply strong recruitment work team successful '
'paid contact email role skills company day good high time required want right success'
'ideal needs feel send yes no arisen arise title true'.split())
stop_words.extend(stoplist)
stoplist = set('work experience role application process contract interested touch'.split())
stop_words.extend(stoplist)
#topics = lda_model.show_topics(formatted=False)
topics = lda_model.show_topics(num_topics=LDA_number_of_topics, formatted=False)
for i_t,topic in enumerate(topics):
topic_i = topic[0]
topic_words = topic[1]
print("topic", topic_i, "===", topic_words)
fig = plt.figure()
topic_words = dict(topic_words)
cloud = WordCloud(stopwords=stop_words,background_color='white',width=2500,height=1800,
max_words=10,colormap='tab10', color_func=lambda *args, **kwargs: colors_topics[topic_i],
prefer_horizontal=1.0)
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(topic_i), fontdict=dict(size=16))
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.savefig(NAME_wordclouds + str(topic_i).zfill(2) + ".png")
plt.close()
# T-SNE
if VIZ_TSNE:
print("Saving TSNE visualization into >", NAME_tsne)
from sklearn.manifold import TSNE
from bokeh.plotting import figure, output_file, show, save, ColumnDataSource
# Get topic weights
doc_features = []
doc_titles = []
doc_dominanttopics = []
for i, row_list in enumerate(lda_model[corpus]):
# What we have in the encoding:
# row_list[0] = Document topics: [(0, 0.87507219282484316), (1, 0.12492780717515681)]
# row_list[1] = Word topics: [(0, [0, 1]), (3, [0, 1]), (4, [0, 1]), (7, [0, 1])]
# row_list[2] = Phi values: [(0, [(0, 0.9783234200583657), (1, 0.021676579941634355)]), (3, [(0, 0.93272653621872503), (1, 0.067273463781275009)]), (4, [(0, 0.98919912227661466), (1, 0.010800877723385368)]), (7, [(0, 0.97541896333079636), (1, 0.024581036669203641)])]
# row_list[0] has the weights to topics
# This means that one document was encoded into the LDA_number_of_topics topics we chose
tmp = np.zeros(LDA_number_of_topics)
max_w = -1
max_w_idx = -1
for j, w in row_list[0]:
tmp[j] = w
if max_w < w:
max_w_idx = j
max_w = w
doc_features.append(tmp)
doc_dominanttopics.append(max_w_idx)
doc_titles.append(titles[i])
arr = pd.DataFrame(doc_features).fillna(0).values
# tSNE Dimension Reduction
tsne_model = TSNE(n_components=2, verbose=1, random_state=0, angle=.99, init='pca')
tsne_lda = tsne_model.fit_transform(arr)
TOOLTIPS = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("desc", "@desc"),
]
mycolors = np.array(colors_topics)
from bokeh.models import HoverTool, WheelZoomTool, PanTool, BoxZoomTool, ResetTool, SaveTool
hover = HoverTool(tooltips=TOOLTIPS)
tools = [hover, WheelZoomTool(), PanTool(), BoxZoomTool(), ResetTool(), SaveTool()]
#plot = figure(title="t-SNE Clustering of {} LDA Topics".format(LDA_number_of_topics),
# tooltips=TOOLTIPS, plot_width=900, plot_height=700)
plot = figure(title="t-SNE Clustering of {} LDA Topics".format(LDA_number_of_topics),
tools=tools, plot_width=900, plot_height=700)
source = ColumnDataSource(data=dict(
x=tsne_lda[:,0],
y=tsne_lda[:,1],
desc=doc_titles,
color=mycolors[doc_dominanttopics],
))
plot.scatter(x='x', y='y', source=source, color='color')
output_file(NAME_tsne)
save(plot)
#show(plot)
if VIZ_html_interactive:
# Takes forever!
print("creating visualization...")
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
print("saving it...")
pyLDAvis.save_html(vis, NAME_html_interactive)
print("done")
if __name__ == "__main__": # has to be in main function for multicore support to work - https://github.com/RaRe-Technologies/gensim/issues/940
main() | [] |
2024-01-10 | pytorch-tpu/transformers | src~transformers~models~clip~configuration_clip.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CLIP model configuration"""
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/config.json",
# See all CLIP models at https://huggingface.co/models?filter=clip
}
class CLIPTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP
text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the text encoder of the CLIP
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`CLIPModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 49406):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 49407):
End of stream token id.
Example:
```python
>>> from transformers import CLIPTextConfig, CLIPTextModel
>>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPTextConfig()
>>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clip_text_model"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
projection_dim=512,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=77,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
# This differs from `CLIPTokenizer`'s default and from openai/clip
# See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
pad_token_id=1,
bos_token_id=49406,
eos_token_id=49407,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
cls._set_token_in_kwargs(kwargs)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the text config dict if we are loading from CLIPConfig
if config_dict.get("model_type") == "clip":
config_dict = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class CLIPVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import CLIPVisionConfig, CLIPVisionModel
>>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPVisionConfig()
>>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clip_vision_model"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
projection_dim=512,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
image_size=224,
patch_size=32,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
cls._set_token_in_kwargs(kwargs)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the vision config dict if we are loading from CLIPConfig
if config_dict.get("model_type") == "clip":
config_dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class CLIPConfig(PretrainedConfig):
r"""
[`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
a configuration with the defaults will yield a similar configuration to that of the CLIP
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import CLIPConfig, CLIPModel
>>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPConfig()
>>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
>>> from transformers import CLIPTextConfig, CLIPVisionConfig
>>> # Initializing a CLIPText and CLIPVision configuration
>>> config_text = CLIPTextConfig()
>>> config_vision = CLIPVisionConfig()
>>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
```"""
model_type = "clip"
def __init__(
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
text_config_dict = kwargs.pop("text_config_dict", None)
vision_config_dict = kwargs.pop("vision_config_dict", None)
super().__init__(**kwargs)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
text_config = {}
# This is the complete result when using `text_config_dict`.
_text_config_dict = CLIPTextConfig(**text_config_dict).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
message = (
f"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The "
f'value `text_config["{key}"]` will be overriden.'
)
logger.warning(message)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
vision_config = {}
# This is the complete result when using `vision_config_dict`.
_vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_vision_config_dict["id2label"] = {
str(key): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
message = (
f"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. "
f'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(message)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
text_config = {}
logger.info("`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.")
if vision_config is None:
vision_config = {}
logger.info("`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.")
self.text_config = CLIPTextConfig(**text_config)
self.vision_config = CLIPVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
@classmethod
def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
r"""
Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model
configuration.
Returns:
[`CLIPConfig`]: An instance of a configuration object
"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
class CLIPOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
]
)
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-4
def generate_dummy_inputs(
self,
processor: "ProcessorMixin",
batch_size: int = -1,
seq_length: int = -1,
framework: Optional["TensorType"] = None,
) -> Mapping[str, Any]:
text_input_dict = super().generate_dummy_inputs(
processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
)
image_input_dict = super().generate_dummy_inputs(
processor.image_processor, batch_size=batch_size, framework=framework
)
return {**text_input_dict, **image_input_dict}
@property
def default_onnx_opset(self) -> int:
return 14
| [] |
2024-01-10 | taradepan/Nebula_bot | rag.py | import cohere
import chromadb
import os
import dotenv
import PyPDF2
import numpy as np
import google.generativeai as genai
dotenv.load_dotenv()
genai.configure(api_key=os.environ.get('GEMINI'))
def pdfchat(prompt, data):
model = genai.GenerativeModel('gemini-pro')
response=model.generate_content(prompt+""" Use the given data to answer the question,
Also add the file name from where the answer was found
in the format inside triple singe quotes '''Source: filename'''.
Remember that your output will be sent as a message on a chat app.
Data: """+str(data))
response.resolve()
return response.text
cohere_api=os.environ.get('COHERE_API')
co = cohere.Client(cohere_api)
client = chromadb.Client()
collection = client.get_or_create_collection(name="main")
def db(text,embed,file_name,num):
collection.add(
documents=[text+" Source: "+file_name],
embeddings=[embed],
metadatas=[{"file_name": file_name}],
ids=[file_name+" "+num]
)
def embed(file, file_name):
with open(file, 'rb') as f:
pdf_reader = PyPDF2.PdfReader(f)
for page_num in range(len(pdf_reader.pages)):
page = pdf_reader.pages[page_num]
text = page.extract_text()
response = co.embed(texts=[text], model='embed-multilingual-v3.0', input_type="search_document")
embeddings = response.embeddings[0]
embeddings = np.array(response.embeddings[0]).astype(np.float64).tolist()
db(text, embeddings, file_name, str(page_num))
def query_search(text):
embedding=co.embed(texts=[text], model='embed-multilingual-v3.0', input_type="search_document")
embedding=embedding.embeddings[0]
embedding=np.array(embedding).astype(np.float64).tolist()
res=collection.query(
query_embeddings=[embedding],
n_results=5,
)
results = co.rerank(model="rerank-english-v2.0", query=text, documents=[str(res["documents"])], top_n=1)
res=[]
for result in results:
document = result.document
res.append(str(document))
return res | [] |
2024-01-10 | Qinghao-Guan/TopMost-TopicModeling | topmost~evaluations~topic_coherence.py | from gensim.corpora import Dictionary
from gensim.models import CoherenceModel
import numpy as np
from tqdm import tqdm
from ..data import file_utils
def compute_TC(reference_texts, dictionary, topics, num_top_words=15, cv_type='c_v'):
cm = CoherenceModel(texts=reference_texts, dictionary=dictionary, topics=topics, topn=num_top_words, coherence=cv_type)
cv_per_topic = cm.get_coherence_per_topic()
score = np.mean(cv_per_topic)
return score
def compute_dynamic_TC(train_texts, train_times, vocab, top_words_list, num_top_words=15, cv_type='c_v'):
dictionary = Dictionary(file_utils.split_text_word(vocab))
split_train_texts = file_utils.split_text_word(train_texts)
cv_score_list = list()
for time in tqdm(range(len(top_words_list))):
# use the texts of the time slice as reference.
idx = np.where(train_times == time)[0]
reference_texts = [split_train_texts[i] for i in idx]
# use the the topics at the time slice
top_words = top_words_list[time]
split_top_words = file_utils.split_text_word(top_words)
cv_score = compute_TC(reference_texts, dictionary, split_top_words, num_top_words, cv_type)
cv_score_list.append(cv_score)
print("===>CV score list: ", cv_score_list)
return np.mean(cv_score_list)
| [] |
2024-01-10 | JacksonZ03/ContentGPT | Embedding%20Generator.py | ### This file is used to generate embeddings for a PDF document and store them in Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.document_loaders import UnstructuredPDFLoader, OnlinePDFLoader, PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores import Pinecone
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
import pinecone
import openai
import os
import dotenv
##Get and initialise API Keys and environment variables from .env file or user input:
try:
dotenv.load_dotenv(dotenv.find_dotenv())
use_env = input("Do you want to use the .env file? (y/n)"+ "\n")
while use_env.lower() not in ["y", "yes", "n", "no"]:
print("Not a valid input")
use_env = input("Do you want to use the .env file? (y/n)\n")
if use_env.lower() in ["y", "yes"]:
print("Using .env file.")
# TODO: get api from .env file and check they are valid
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
print("Using OpenAI API key: " + OPENAI_API_KEY + "\n")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
print("Using Pinecone API key: " + PINECONE_API_KEY + "\n")
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT")
print("Using Pinecone environment: " + PINECONE_ENVIRONMENT + "\n")
PINECONE_INDEX_NAME = os.getenv("PINECONE_INDEX_NAME")
print("Using Pinecone index name: " + PINECONE_INDEX_NAME + "\n")
else:
print("No .env file found.")
print("Please enter your API keys manually.")
OPENAI_API_KEY = input("Enter your OpenAI API key: ")
PINECONE_API_KEY = input("Enter your Pinecone API key: ")
PINECONE_ENVIRONMENT = input("Enter your Pinecone environment: ")
PINECONE_INDEX_NAME = input("Enter your Pinecone index name: ")
except:
print("No .env file found.")
# Ask user for OpenAI API key
while True:
try:
OPENAI_API_KEY = input("Enter your OpenAI API key: ")
openai.api_key = OPENAI_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
print("Using OpenAI API key: " + OPENAI_API_KEY + "\n")
break
except:
print("Invalid API key.")
# Ask user for Pinecone API key and environment
while True:
try:
PINECONE_API_KEY = input("Enter your Pinecone API key: ")
PINECONE_ENVIRONMENT = input("Enter your Pinecone environment: ")
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
print("Using Pinecone API key: " + PINECONE_API_KEY)
print("Using Pinecone environment: " + PINECONE_ENVIRONMENT + "\n")
break
except:
print("Invalid API key or environment.")
# Ask user for Pinecone index name
PINECONE_INDEX_NAME = input("Enter your Pinecone index name: ")
print("Using Pinecone index: " + PINECONE_INDEX_NAME + "\n")
## Initialize OpenAI API key and environment
openai.api_key = OPENAI_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
## Initialize Pinecone using API key and environment
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
index_name = PINECONE_INDEX_NAME
## Set File directory for PDF document
filePath = os.path.dirname(os.path.realpath(__file__)) + '/docs/' + input("Enter the name of the PDF file you want to create embeddings for: ")
## Load the PDF document
document_loader = UnstructuredPDFLoader(filePath)
data = document_loader.load()
print (f'You currently have {len(data)} document(s) in your data')
## Split the document into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0) ## 2000 characters per chunk - change this to your liking
texts = text_splitter.split_documents(data)
print(f'Finished Splitting - Now you have {len(texts)} documents')
## Vectorize the document chunks and turn them into embeddings
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY, model="text-embedding-ada-002")
while True:
try:
# TODO: this may not be the best way to check if the index exists as it will throw an error if there is a connection issue as well
docsearch = Pinecone.from_texts([t.page_content for t in texts], embeddings, index_name=index_name)
print("Success! Embeddings created and stored in Pinecone index: " + index_name)
break
except:
print("Index doesn't exist.")
# Ask user if they want to create a new index
create_new_index = input("Do you want to create a new index? (y/n)\n").lower()
while create_new_index != "y" or "yes" or "n" or "no":
print("not a valid input")
create_new_index = input("Do you want to create a new index? (y/n)\n").lower()
if create_new_index in ["y", "yes"]: # User selects "yes"
try:
pinecone.delete_index(index_name=index_name)
except:
pass
print("Creating new index...")
pinecone.create_index(name=index_name, metric="cosine", dimension=1536, pod_type= "p2")
docsearch = Pinecone.from_texts([t.page_content for t in texts], embeddings, index_name=index_name)
print("Success! Embeddings created and stored in Pinecone index: " + index_name)
else:
index_name = input("Enter a different Pinecone index name: ") # User selects "no"
print("Using Pinecone index: " + index_name + "\n") | [] |
2024-01-10 | enescanguven/chobo | backend~helpers~dalle_helper.py | import os
import hashlib
import random
import openai
import logging
import subprocess
COVERS_FOLDER = "covers"
class DallEHelper():
def __init__(self, api_key: str) -> None:
openai.api_key = api_key
def create_image(self, prompt) -> str:
response = openai.Image.create(
prompt=prompt,
n=1,
size="512x512"
)
url = response['data'][0]['url']
fname = hashlib.md5(url.encode()).hexdigest()+".png"
self.download_files(url, fname)
return os.path.join(COVERS_FOLDER, fname)
@staticmethod
def download_files(url, fname):
subprocess.call(["wget", url, "-O", os.path.join(COVERS_FOLDER, fname)])
| [] |
2024-01-10 | enescanguven/chobo | backend~controller~recipe_controller.py | import os
import json
import openai
from controller.object_detection_controller import object_detection_controller
from helpers.dalle_helper import DallEHelper
openai.api_key = os.getenv("OPENAI_API_KEY")
class RecipeController():
def __init__(self) -> None:
pass
def create_recipe_from_prompt_text(self, prompt_text):
prompt_start = "Sana verdiğim malzeme listesi ve bilgiler dahilinde bana bir yemek tarifi öner.\n"
prompt = prompt_start + prompt_text
prompt += """\nTarifi bu formatta olustur. : {"name": "yemek ismi", "ingredients": ["1 bardak sut", "1 çorba kaşığı un"], "instructions" : " ornek ornek"} \n"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system",
"content":
"""Bir aşçı gibi davran. Sana verdiğim malzeme listesinden bana bir yemek tarifi oluştur. Verdiğim listeye sadık kalmaya çalış. Malzemelerin hepsini kullanmak zorunda değilsin. Her evde bulunabilecek malzemeleri de var kabul edebilirsin.Bir aşçı gibi davran. Sana verdiğim malzeme listesinden bana bir yemek tarifi oluştur. Verdiğim listeye sadık kalmaya çalış. Malzemelerin hepsini kullanmak zorunda değilsin. Her evde bulunabilecek malzemeleri de var kabul edebilirsin. Yemeğin adı, içeriği ve yapılışını aşşağıdaki JSON formatinda ver bunun dışında bir şey yazma"""},
{"role": "user", "content": prompt}, ])
print(response["choices"][0]["message"]["content"])
response_text = json.loads(response["choices"][0]["message"]["content"])
dh = DallEHelper(os.getenv("OPENAI_API_KEY"))
image_path = dh.create_image(response_text["name"])
print({"recipe": response_text, "image": image_path.split('/')[1]})
return {"recipe": response_text, "image": image_path.split('/')[1]}
def create_recipe_from_image(self, image_path, choices):
print(image_path, choices)
choice_dict = {
"isVeganSelected": "vegan",
"isVegetarianSelected": "vejetaryen",
"isGlutenFreeSelected": "glutensiz",
"isKetoSelected": "keto diyete uygun",
"isLowCarbSelected": "düşük karbonhidratlı",
"isLowFatSelected": "düşük yağlı",
}
ingredients = object_detection_controller.get_fridge_contents(image_path)
prompt = f"Sana verdiğim malzeme listesinden bana bir {choices['recipeType']} tarifi öner Bu tarif {', '.join([choice_dict[item] for item in choice_dict.keys() if choices[item]])} bir tarif olsun:\n"
print(prompt)
for ingredient in ingredients["ingredients"]:
prompt += f"- {ingredient}\n"
prompt += """\nTarifi bu formatta olustur. : {"name": "yemek ismi", "ingredients": ["1 bardak sut", "1 çorba kaşığı un"], "instructions" : " ornek ornek"} \n"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system",
"content":
"""Bir aşçı gibi davran. Sana verdiğim malzeme listesinden bana bir yemek tarifi oluştur. Verdiğim listeye sadık kalmaya çalış. Malzemelerin hepsini kullanmak zorunda değilsin. Her evde bulunabilecek malzemeleri de var kabul edebilirsin.Bir aşçı gibi davran. Sana verdiğim malzeme listesinden bana bir yemek tarifi oluştur. Verdiğim listeye sadık kalmaya çalış. Malzemelerin hepsini kullanmak zorunda değilsin. Her evde bulunabilecek malzemeleri de var kabul edebilirsin. Yemeğin adı, içeriği ve yapılışını aşşağıdaki JSON formatinda ver bunun dışında bir şey yazma"""},
{"role": "user", "content": prompt}, ])
print(response["choices"][0]["message"]["content"])
response_text = json.loads(response["choices"][0]["message"]["content"])
dh = DallEHelper(os.getenv("OPENAI_API_KEY"))
image_path = dh.create_image(response_text["name"])
print({"recipe": response_text, "image": image_path.split('/')[1], "detected_objects": ingredients["ingredients"]})
return {"recipe": response_text, "image": image_path.split('/')[1], "detected_objects": ingredients["ingredients"]}
recipe_controller = RecipeController()
| [
"- PLACEHOLDER\n",
"recipeType",
"PLACEHOLDERPLACEHOLDER",
"Bir aşçı gibi davran. Sana verdiğim malzeme listesinden bana bir yemek tarifi oluştur. Verdiğim listeye sadık kalmaya çalış. Malzemelerin hepsini kullanmak zorunda değilsin. Her evde bulunabilecek malzemeleri de var kabul edebilirsin.Bir aşçı gibi davran. Sana verdiğim malzeme listesinden bana bir yemek tarifi oluştur. Verdiğim listeye sadık kalmaya çalış. Malzemelerin hepsini kullanmak zorunda değilsin. Her evde bulunabilecek malzemeleri de var kabul edebilirsin. Yemeğin adı, içeriği ve yapılışını aşşağıdaki JSON formatinda ver bunun dışında bir şey yazma",
"Sana verdiğim malzeme listesi ve bilgiler dahilinde bana bir yemek tarifi öner.\n",
"\nTarifi bu formatta olustur. : {\"name\": \"yemek ismi\", \"ingredients\": [\"1 bardak sut\", \"1 çorba kaşığı un\"], \"instructions\" : \" ornek ornek\"} \n",
", "
] |
2024-01-10 | eddieoz/reels-clips-automator | reelsfy.py | import sys
import numpy as np
from pytube import YouTube
import cv2
import subprocess
import openai
import json
from datetime import datetime
import os
from os import path
import shutil
import argparse
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
# Download video
def download_video(url, filename):
yt = YouTube(url)
video = yt.streams.filter(file_extension='mp4').get_highest_resolution()
# Download the video
video.download(filename=filename, output_path='tmp/')
#Segment Video function
def generate_segments(response):
for i, segment in enumerate(response):
print(i, segment)
start_time = segment.get("start_time", 0).split('.')[0]
end_time = segment.get("end_time", 0).split('.')[0]
pt = datetime.strptime(start_time,'%H:%M:%S')
start_time = pt.second + pt.minute*60 + pt.hour*3600
pt = datetime.strptime(end_time,'%H:%M:%S')
end_time = pt.second + pt.minute*60 + pt.hour*3600
if end_time - start_time < 50:
end_time += (50 - (end_time - start_time))
output_file = f"output{str(i).zfill(3)}.mp4"
command = f"ffmpeg -y -hwaccel cuda -i tmp/input_video.mp4 -vf scale='1920:1080' -qscale:v '3' -b:v 6000k -ss {start_time} -to {end_time} tmp/{output_file}"
subprocess.call(command, shell=True)
def generate_short(input_file, output_file):
try:
# Interval to switch faces (in frames) (ex. 150 frames = 5 seconds, on a 30fps video)
switch_interval = 150
# Frame counter
frame_count = 0
# Index of the currently displayed face
current_face_index = 0
# Constants for cropping
CROP_RATIO_BIG = 1 # Adjust the ratio to control how much of the image (around face) is visible in the cropped video
CROP_RATIO_SMALL = 0.5
VERTICAL_RATIO = 9 / 16 # Aspect ratio for the vertical video
# Load pre-trained face detector from OpenCV
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# Open video file
cap = cv2.VideoCapture(f"tmp/{input_file}")
# Get the frame dimensions
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(f"Image frame_height {frame_height}, frame_width {frame_width}")
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(f"tmp/{output_file}", fourcc, 30, (1080, 1920)) # Adjust resolution for 9:16 aspect ratio
# success = False
while(cap.isOpened()):
# Read frame from video
ret, frame = cap.read()
if ret == True:
# If we don't have any face positions, detect the faces
# Switch faces if it's time to do so
if frame_count % switch_interval == 0:
# Convert color style from BGR to RGB
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Perform face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=7, minSize=(100, 100))
if len(faces) > 0:
# Initialize trackers and variable to hold face positions
trackers = cv2.legacy.MultiTracker_create()
face_positions = []
for (x, y, w, h) in faces:
face_positions.append((x, y, w, h))
tracker = cv2.legacy.TrackerKCF_create()
tracker.init(frame, (x, y, w, h))
trackers.add(tracker, frame, (x, y, w, h))
# Update trackers and get updated positions
success, boxes = trackers.update(frame)
# Switch faces if it's time to do so
current_face_index = (current_face_index + 1) % len(face_positions)
x, y, w, h = [int(v) for v in boxes[current_face_index]]
print (f"Current Face index {current_face_index} heigth {h} width {w} total faces {len(face_positions)}")
face_center = (x + w//2, y + h//2)
if w * 16 > h * 9:
w_916 = w
h_916 = int(w * 16 / 9)
else:
h_916 = h
w_916 = int(h * 9 / 16)
#Calculate the target width and height for cropping (vertical format)
if max(h, w) < 345:
target_height = int(frame_height * CROP_RATIO_SMALL)
target_width = int(target_height * VERTICAL_RATIO)
else:
target_height = int(frame_height * CROP_RATIO_BIG)
target_width = int(target_height * VERTICAL_RATIO)
# Calculate the top-left corner of the 9:16 rectangle
x_916 = (face_center[0] - w_916 // 2)
y_916 = (face_center[1] - h_916 // 2)
crop_x = max(0, x_916 + (w_916 - target_width) // 2) # Adjust the crop region to center the face
crop_y = max(0, y_916 + (h_916 - target_height) // 2)
crop_x2 = min(crop_x + target_width, frame_width)
crop_y2 = min(crop_y + target_height, frame_height)
# Crop the frame to the face region
crop_img = frame[crop_y:crop_y2, crop_x:crop_x2]
resized = cv2.resize(crop_img, (1080, 1920), interpolation = cv2.INTER_AREA)
out.write(resized)
frame_count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
# Extract audio from original video
command = f"ffmpeg -y -hwaccel cuda -i tmp/{input_file} -vn -acodec copy tmp/output-audio.aac"
subprocess.call(command, shell=True)
# Merge audio and processed video
command = f"ffmpeg -y -hwaccel cuda -i tmp/{output_file} -i tmp/output-audio.aac -c copy tmp/final-{output_file}"
subprocess.call(command, shell=True)
except Exception as e:
print(f"Error during video cropping: {str(e)}")
def generate_viral(transcript): # Inspiredby https://github.com/NisaarAgharia/AI-Shorts-Creator
json_template = '''
{ "segments" :
[
{
"start_time": 00.00,
"end_time": 00.00,
"description": "Description of the text",
"duration":00,
},
]
}
'''
prompt = f"Given the following video transcript, analyze each part for potential virality and identify 3 most viral segments from the transcript. Each segment should have nothing less than 50 seconds in duration. The provided transcript is as follows: {transcript}. Based on your analysis, return a JSON document containing the timestamps (start and end), the description of the viral part, and its duration. The JSON document should follow this format: {json_template}. Please replace the placeholder values with the actual results from your analysis."
system = f"You are a Viral Segment Identifier, an AI system that analyzes a video's transcript and predict which segments might go viral on social media platforms. You use factors such as emotional impact, humor, unexpected content, and relevance to current trends to make your predictions. You return a structured JSON document detailing the start and end times, the description, and the duration of the potential viral segments."
messages = [
{"role": "system", "content" : system},
{"role": "user", "content": prompt}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
max_tokens=512,
n=1,
stop=None
)
return response.choices[0]['message']
def generate_subtitle(input_file, output_folder, results_folder):
command = f"auto_subtitle tmp/{input_file} -o {results_folder}/{output_folder} --model medium"
print (command)
subprocess.call(command, shell=True)
def generate_transcript(input_file):
command = f"auto_subtitle tmp/{input_file} --srt_only True --output_srt True -o tmp/ --model medium"
subprocess.call(command, shell=True)
# Read the contents of the input file
try:
with open(f"tmp/{os.path.basename(input_file).split('.')[0]}.srt", 'r', encoding='utf-8') as file:
transcript = file.read()
except IOError:
print("Error: Failed to read the input file.")
sys.exit(1)
return transcript
def __main__():
# Check command line argument
parser = argparse.ArgumentParser(description='Create 3 reels or tiktoks from Youtube video')
parser.add_argument('-v', '--video_id', required=False, help='Youtube video id. Ex: Cuptv7-A4p0 in https://www.youtube.com/watch?v=Cuptv7-A4p0')
parser.add_argument('-f', '--file', required=False, help='Video file to be used')
args = parser.parse_args()
if not args.video_id and not args.file:
print('Needed at least one argument. <command> --help for help')
sys.exit(1)
if args.video_id and args.file:
print('use --video_id or --file')
sys.exit(1)
# Create temp folder
try:
if os.path.exists("tmp"):
shutil.rmtree("tmp")
os.mkdir('tmp')
except OSError as error:
print(error)
filename = 'input_video.mp4'
if args.video_id:
video_id=args.video_id
url = 'https://www.youtube.com/watch?v='+video_id # Replace with your video's URL
# Download video
download_video(url,filename)
if args.file:
video_id = os.path.basename(args.file).split('.')[0]
print(video_id)
if (path.exists(args.file) == True):
command = f"cp {args.file} tmp/input_video.mp4"
subprocess.call(command, shell=True)
else:
print(f"File {args.file} does not exist")
sys.exit(1)
output_folder = 'results'
# Create outputs folder
try:
os.mkdir(f"{output_folder}")
except OSError as error:
print(error)
try:
os.mkdir(f"{output_folder}/{video_id}")
except OSError as error:
print(error)
# Verifies if output_file exists, or create it. If exists, it doesn't call OpenAI APIs
output_file = f"{output_folder}/{video_id}/content.txt"
if (path.exists(output_file) == False):
# generate transcriptions
transcript = generate_transcript(filename)
print (transcript)
viral_segments = generate_viral(transcript)
content = viral_segments["content"]
try:
with open(output_file, 'w', encoding='utf-8') as file:
file.write(content)
except IOError:
print("Error: Failed to write the output file.")
sys.exit(1)
print("Full transcription written to ", output_file)
else:
# Read the contents of the input file
try:
with open(output_file, 'r', encoding='utf-8') as file:
content = file.read()
except IOError:
print("Error: Failed to read the input file.")
sys.exit(1)
parsed_content = json.loads(content)
generate_segments(parsed_content['segments'])
# Loop through each segment
for i, segment in enumerate(parsed_content['segments']): # Replace xx with the actual number of segments
input_file = f'output{str(i).zfill(3)}.mp4'
output_file = f'output_cropped{str(i).zfill(3)}.mp4'
generate_short(input_file, output_file)
generate_subtitle(f"final-{output_file}", video_id, output_folder)
__main__()
| [
"\n { \"segments\" :\n [\n {\n \"start_time\": 00.00, \n \"end_time\": 00.00,\n \"description\": \"Description of the text\",\n \"duration\":00,\n }, \n ]\n }\n ",
"Given the following video transcript, analyze each part for potential virality and identify 3 most viral segments from the transcript. Each segment should have nothing less than 50 seconds in duration. The provided transcript is as follows: PLACEHOLDER. Based on your analysis, return a JSON document containing the timestamps (start and end), the description of the viral part, and its duration. The JSON document should follow this format: PLACEHOLDER. Please replace the placeholder values with the actual results from your analysis."
] |
2024-01-10 | dynamic-superb/espnet-whisper | espnet2~train~preprocessor.py | import json
import logging
import random
import re
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Collection, Dict, Iterable, List, Optional, Tuple, Union
import os
import numpy as np
import scipy.signal
import soundfile
from typeguard import check_argument_types, check_return_type
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.cleaner import TextCleaner
from espnet2.text.hugging_face_token_id_converter import HuggingFaceTokenIDConverter
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.text.whisper_token_id_converter import OpenAIWhisperTokenIDConverter
class AbsPreprocessor(ABC):
def __init__(self, train: bool):
self.train = train
@abstractmethod
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
raise NotImplementedError
def framing(
x,
frame_length: int = 512,
frame_shift: int = 256,
centered: bool = True,
padded: bool = True,
):
if x.size == 0:
raise ValueError("Input array size is zero")
if frame_length < 1:
raise ValueError("frame_length must be a positive integer")
if frame_length > x.shape[-1]:
raise ValueError("frame_length is greater than input length")
if 0 >= frame_shift:
raise ValueError("frame_shift must be greater than 0")
if centered:
pad_shape = [(0, 0) for _ in range(x.ndim - 1)] + [
(frame_length // 2, frame_length // 2)
]
x = np.pad(x, pad_shape, mode="constant", constant_values=0)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = frame_length + (nseg-1)*nstep,
# with integer nseg
nadd = (-(x.shape[-1] - frame_length) % frame_shift) % frame_length
pad_shape = [(0, 0) for _ in range(x.ndim - 1)] + [(0, nadd)]
x = np.pad(x, pad_shape, mode="constant", constant_values=0)
# Created strided array of data segments
if frame_length == 1 and frame_length == frame_shift:
result = x[..., None]
else:
shape = x.shape[:-1] + (
(x.shape[-1] - frame_length) // frame_shift + 1,
frame_length,
)
strides = x.strides[:-1] + (frame_shift * x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
return result
def detect_non_silence(
x: np.ndarray,
threshold: float = 0.01,
frame_length: int = 1024,
frame_shift: int = 512,
window: str = "boxcar",
) -> np.ndarray:
"""Power based voice activity detection.
Args:
x: (Channel, Time)
>>> x = np.random.randn(1000)
>>> detect = detect_non_silence(x)
>>> assert x.shape == detect.shape
>>> assert detect.dtype == np.bool
"""
if x.shape[-1] < frame_length:
return np.full(x.shape, fill_value=True, dtype=np.bool)
if x.dtype.kind == "i":
x = x.astype(np.float64)
# framed_w: (C, T, F)
framed_w = framing(
x,
frame_length=frame_length,
frame_shift=frame_shift,
centered=False,
padded=True,
)
framed_w *= scipy.signal.get_window(window, frame_length).astype(framed_w.dtype)
# power: (C, T)
power = (framed_w**2).mean(axis=-1)
# mean_power: (C, 1)
mean_power = np.mean(power, axis=-1, keepdims=True)
if np.all(mean_power == 0):
return np.full(x.shape, fill_value=True, dtype=np.bool)
# detect_frames: (C, T)
detect_frames = power / mean_power > threshold
# detects: (C, T, F)
detects = np.broadcast_to(
detect_frames[..., None], detect_frames.shape + (frame_shift,)
)
# detects: (C, TF)
detects = detects.reshape(*detect_frames.shape[:-1], -1)
# detects: (C, TF)
return np.pad(
detects,
[(0, 0)] * (x.ndim - 1) + [(0, x.shape[-1] - detects.shape[-1])],
mode="edge",
)
class CommonPreprocessor(AbsPreprocessor):
def __init__(
self,
train: bool,
use_lang_prompt: bool = False,
use_nlp_prompt: bool = False,
use_label: bool = False,
use_prefix: bool = False,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
aux_task_names: Collection[str] = None,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: str = "text",
fs: int = 0,
nonsplit_symbol: Iterable[str] = None,
):
super().__init__(train)
self.train = train
self.speech_name = speech_name
self.text_name = text_name
self.speech_volume_normalize = speech_volume_normalize
self.rir_apply_prob = rir_apply_prob
self.noise_apply_prob = noise_apply_prob
self.short_noise_thres = short_noise_thres
self.aux_task_names = aux_task_names
self.use_lang_prompt = use_lang_prompt
self.use_nlp_prompt = use_nlp_prompt
self.use_label = use_label
self.use_prefix = use_prefix
if token_type is not None:
if token_list is None:
raise ValueError("token_list is required if token_type is not None")
self.text_cleaner = TextCleaner(text_cleaner)
self.tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
nonsplit_symbol=nonsplit_symbol,
)
if token_type == "hugging_face":
self.token_id_converter = HuggingFaceTokenIDConverter(
model_name_or_path=bpemodel
)
elif bpemodel not in ["whisper_en", "whisper_multilingual"]:
self.token_id_converter = TokenIDConverter(
token_list=token_list,
unk_symbol=unk_symbol,
)
else:
self.token_id_converter = OpenAIWhisperTokenIDConverter(
model_type=bpemodel,
added_tokens_txt=non_linguistic_symbols,
)
else:
self.text_cleaner = None
self.tokenizer = None
self.token_id_converter = None
if train and rir_scp is not None:
self.rirs = []
with open(rir_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
self.rirs.append(sps[0])
else:
self.rirs.append(sps[1])
else:
self.rirs = None
if train and noise_scp is not None:
self.noises = []
with open(noise_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
self.noises.append(sps[0])
else:
self.noises.append(sps[1])
sps = noise_db_range.split("_")
if len(sps) == 1:
self.noise_db_low = self.noise_db_high = float(sps[0])
elif len(sps) == 2:
self.noise_db_low, self.noise_db_high = float(sps[0]), float(sps[1])
else:
raise ValueError(
"Format error: '{noise_db_range}' e.g. -3_4 -> [-3db,4db]"
)
else:
self.noises = None
if self.use_label and os.path.exists('/home/stan/espnet/egs2/stop/big_superb/dump/raw/test/label.json'):
self.label_table = json.load(open('/home/stan/espnet/egs2/stop/big_superb/dump/raw/test/label.json', 'r'))
def _convolve_rir(self, speech, power, rirs):
rir_path = np.random.choice(rirs)
rir = None
if rir_path is not None:
rir, _ = soundfile.read(rir_path, dtype=np.float64, always_2d=True)
# rir: (Nmic, Time)
rir = rir.T
# speech: (Nmic, Time)
# Note that this operation doesn't change the signal length
speech = scipy.signal.convolve(speech, rir, mode="full")[
:, : speech.shape[1]
]
# Reverse mean power to the original power
power2 = (speech[detect_non_silence(speech)] ** 2).mean()
speech = np.sqrt(power / max(power2, 1e-10)) * speech
return speech, rir
def _add_noise(self, speech, power, noises, noise_db_low, noise_db_high):
nsamples = speech.shape[1]
noise_path = np.random.choice(noises)
noise = None
if noise_path is not None:
noise_db = np.random.uniform(noise_db_low, noise_db_high)
with soundfile.SoundFile(noise_path) as f:
if f.frames == nsamples:
noise = f.read(dtype=np.float64, always_2d=True)
elif f.frames < nsamples:
if f.frames / nsamples < self.short_noise_thres:
logging.warning(
f"Noise ({f.frames}) is much shorter than "
f"speech ({nsamples}) in dynamic mixing"
)
offset = np.random.randint(0, nsamples - f.frames)
# noise: (Time, Nmic)
noise = f.read(dtype=np.float64, always_2d=True)
# Repeat noise
noise = np.pad(
noise,
[(offset, nsamples - f.frames - offset), (0, 0)],
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - nsamples)
f.seek(offset)
# noise: (Time, Nmic)
noise = f.read(nsamples, dtype=np.float64, always_2d=True)
if len(noise) != nsamples:
raise RuntimeError(f"Something wrong: {noise_path}")
# noise: (Nmic, Time)
noise = noise.T
noise_power = (noise**2).mean()
scale = (
10 ** (-noise_db / 20)
* np.sqrt(power)
/ np.sqrt(max(noise_power, 1e-10))
)
speech = speech + scale * noise
return speech, noise
def _speech_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
if self.speech_name in data:
if self.train and (self.rirs is not None or self.noises is not None):
speech = data[self.speech_name]
# speech: (Nmic, Time)
if speech.ndim == 1:
speech = speech[None, :]
else:
speech = speech.T
# Calc power on non silence region
power = (speech[detect_non_silence(speech)] ** 2).mean()
# 1. Convolve RIR
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
speech, _ = self._convolve_rir(speech, power, self.rirs)
# 2. Add Noise
if (
self.noises is not None
and self.noise_apply_prob >= np.random.random()
):
speech, _ = self._add_noise(
speech,
power,
self.noises,
self.noise_db_low,
self.noise_db_high,
)
speech = speech.T
ma = np.max(np.abs(speech))
if ma > 1.0:
speech /= ma
data[self.speech_name] = speech
if self.speech_volume_normalize is not None:
speech = data[self.speech_name]
ma = np.max(np.abs(speech))
data[self.speech_name] = speech * self.speech_volume_normalize / ma
assert check_return_type(data)
return data
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
def pad(l, n):
return l[:n] + [50257]*(n-len(l))
# def parse_label(label_str):
# label_str = label_str.split('The answer could be ')[-1]
# [label1, label2] = label_str.replace('.', '').split('or ')
# if ',' in label1:
# label1 = label1.split(',')
# else:
# label1 = [label1]
# return [l.strip() for l in label1 if l != ' '] + [label2.strip()]
if self.text_name in data and self.tokenizer is not None:
text = data[self.text_name]
if isinstance(text, np.ndarray):
return data
text = self.text_cleaner(text)
label_tokens = None
# For inference to add prompt
if text == "''":
text = ""
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
if len(text_ints) > 500:
logging.warning(
"The length of the text output exceeds 500, "
"which may cause OOM on the GPU."
"Please ensure that the data processing is correct and verify it."
)
actual_token=self.token_id_converter.tokenizer.tokenizer.convert_ids_to_tokens(text_ints)
if self.use_label:
label = data["label"].split(",")
label_tokens = [self.tokenizer.text2tokens(l) for l in label]
label_ints = [self.token_id_converter.tokenizer.tokenizer.convert_tokens_to_ids(l) for l in label_tokens]
max_length = max([len(l) for l in label_ints])
label_ints = [pad(l, max_length) for l in label_ints]
data['label'] = np.array(label_ints, dtype=np.int64)
if self.use_lang_prompt:
if data["prompt"]=="<|nospeech|>":
actual_token=[data["prompt"]]
else:
actual_token=data["prompt"].split()+actual_token[2:]
elif self.use_nlp_prompt :
assert 'prompt' in data, "prompt not in data"
prompt_tokens = self.tokenizer.text2tokens(data["prompt"])
# actual_token=prompt_tokens + [actual_token[0]] + actual_token[2:]
prompt_ints=self.token_id_converter.tokenizer.tokenizer.convert_tokens_to_ids(prompt_tokens)
data["prompt"] = np.array(prompt_ints, dtype=np.int64)
elif "prompt" in data:
if len(data["prompt"].split())>1:
actual_token=[actual_token[0]]+data["prompt"].split()+actual_token[2:]
else:
actual_token[1]=data["prompt"]
if self.use_prefix:
assert 'prefix' in data, "prefix not in data"
prefix_tokens = self.tokenizer.text2tokens(data["prefix"])
# actual_token=prompt_tokens + [actual_token[0]] + actual_token[2:]
prefix_ints=self.token_id_converter.tokenizer.tokenizer.convert_tokens_to_ids(prefix_tokens)
data["prefix"] = np.array(prefix_ints, dtype=np.int64)
text_ints=self.token_id_converter.tokenizer.tokenizer.convert_tokens_to_ids(actual_token)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
# if "prompt" in data:
# if len(data["prompt"].split())>1:
# data["prompt"] = np.array(self.token_id_converter.tokenizer.tokenizer.convert_tokens_to_ids(data["prompt"].split()), dtype=np.int64)
# else:
# data["prompt"] = np.array([self.token_id_converter.tokenizer.tokenizer.convert_tokens_to_ids(data["prompt"])], dtype=np.int64)
if self.aux_task_names is not None and self.tokenizer is not None:
for name in self.aux_task_names:
if name in data:
text = data[name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(data)
data = self._text_process(data)
return data
class SLUPreprocessor(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
transcript_token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: str = "text",
):
super().__init__(
train=train,
token_type=token_type,
token_list=token_list,
bpemodel=bpemodel,
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
text_name=text_name,
)
if transcript_token_list is not None:
print("using transcript")
self.transcript_tokenizer = build_tokenizer(
token_type="word",
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
self.transcript_token_id_converter = TokenIDConverter(
token_list=transcript_token_list,
unk_symbol=unk_symbol,
)
else:
self.transcript_tokenizer = None
self.transcript_token_id_converter = None
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
if self.text_name in data and self.tokenizer is not None:
text = data[self.text_name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
if "transcript" in data and self.tokenizer is not None:
text = data["transcript"]
text = self.text_cleaner(text)
tokens = self.transcript_tokenizer.text2tokens(text)
text_ints = self.transcript_token_id_converter.tokens2ids(tokens)
data["transcript"] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
class CommonPreprocessor_multi(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
aux_task_names: Collection[str] = None,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: List[str] = ["text"],
fs: int = 0,
speaker_change_symbol: Iterable[str] = None,
):
super().__init__(
train=train,
token_type=token_type,
token_list=token_list,
bpemodel=bpemodel,
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
aux_task_names=aux_task_names,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
fs=fs,
nonsplit_symbol=speaker_change_symbol,
)
if isinstance(text_name, str):
self.text_name = [text_name]
else:
self.text_name = text_name
self.speaker_change_symbol = speaker_change_symbol
if speaker_change_symbol is not None:
assert (
len(self.text_name) == 1
), "SOT model with speaker_change_symbol only support single text input."
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
for text_n in self.text_name:
if text_n in data and self.tokenizer is not None:
text = data[text_n]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[text_n] = np.array(text_ints, dtype=np.int64)
if self.aux_task_names is not None and self.tokenizer is not None:
for name in self.aux_task_names:
if name in data:
text = data[name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(data)
data = self._text_process(data)
return data
class MutliTokenizerCommonPreprocessor(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: List[str] = [None],
token_list: List[Union[Path, str, Iterable[str]]] = [None],
bpemodel: List[Union[Path, str, Iterable[str]]] = [None],
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: List[str] = ["text"],
tokenizer_encode_conf: List[Dict] = [dict(), dict()],
):
# TODO(jiatong): sync with Kamo and Jing on interface for preprocessor
super().__init__(
train=train,
token_type=token_type[0],
token_list=token_list[0],
bpemodel=bpemodel[0],
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
speech_name=speech_name,
text_name=text_name[0],
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
)
assert (
len(token_type) == len(token_list) == len(bpemodel) == len(text_name)
), "token_type, token_list, bpemodel, or processing text_name mismatched"
self.num_tokenizer = len(token_type)
self.tokenizer = []
self.token_id_converter = []
for i in range(self.num_tokenizer):
if token_type[i] is not None:
if token_list[i] is None:
raise ValueError("token_list is required if token_type is not None")
self.tokenizer.append(
build_tokenizer(
token_type=token_type[i],
bpemodel=bpemodel[i],
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
encode_kwargs=tokenizer_encode_conf[i]
if i < len(tokenizer_encode_conf)
else None,
)
)
self.token_id_converter.append(
TokenIDConverter(
token_list=token_list[i],
unk_symbol=unk_symbol,
)
)
else:
self.tokenizer.append(None)
self.token_id_converter.append(None)
self.text_cleaner = TextCleaner(text_cleaner)
self.text_name = text_name # override the text_name from CommonPreprocessor
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
for i in range(self.num_tokenizer):
text_name = self.text_name[i]
if text_name in data and self.tokenizer[i] is not None:
text = data[text_name]
text = self.text_cleaner(text)
tokens = self.tokenizer[i].text2tokens(text)
text_ints = self.token_id_converter[i].tokens2ids(tokens)
data[text_name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
class DynamicMixingPreprocessor(AbsPreprocessor):
def __init__(
self,
train: bool,
source_scp: str = None,
ref_num: int = 2,
dynamic_mixing_gain_db: float = 0.0,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
mixture_source_name: str = None,
utt2spk: str = None,
categories: Optional[List] = None,
):
super().__init__(train)
self.source_scp = source_scp
self.ref_num = ref_num
self.dynamic_mixing_gain_db = dynamic_mixing_gain_db
self.speech_name = speech_name
self.speech_ref_name_prefix = speech_ref_name_prefix
# mixture_source_name: the key to select source utterances from dataloader
if mixture_source_name is None:
self.mixture_source_name = f"{speech_ref_name_prefix}1"
else:
self.mixture_source_name = mixture_source_name
self.sources = {}
assert (
source_scp is not None
), f"Please pass `source_scp` to {type(self).__name__}"
with open(source_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
assert len(sps) == 2
self.sources[sps[0]] = sps[1]
self.utt2spk = {}
if utt2spk is None:
# if utt2spk is not provided, create a dummy utt2spk with uid.
for key in self.sources.keys():
self.utt2spk[key] = key
else:
with open(utt2spk, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
assert len(sps) == 2
self.utt2spk[sps[0]] = sps[1]
for key in self.sources.keys():
assert key in self.utt2spk
self.source_keys = list(self.sources.keys())
# Map each category into a unique integer
self.categories = {}
if categories:
count = 0
for c in categories:
if c not in self.categories:
self.categories[c] = count
count += 1
def _pick_source_utterances_(self, uid):
# return (ref_num - 1) uid of reference sources.
source_keys = [uid]
spk_ids = [self.utt2spk[uid]]
retry_cnt = 0
while len(source_keys) < self.ref_num:
picked = random.choice(self.source_keys)
spk_id = self.utt2spk[picked]
# make one utterance or one speaker only appears once in mixing.
if (picked not in source_keys) and (spk_id not in spk_ids):
source_keys.append(picked)
else:
retry_cnt += 1
if retry_cnt > 10:
source_keys.append(picked)
logging.warning(
"Can not find speech source from different speaker "
f"for {retry_cnt} times."
"There may be problems with training data. "
"Please check the utt2spk file."
)
return source_keys[1:]
def _read_source_(self, key, speech_length):
source, _ = soundfile.read(
self.sources[key],
dtype=np.float32,
always_2d=False,
)
if speech_length > source.shape[0]:
pad = speech_length - source.shape[0]
source = np.pad(source, (0, pad), "reflect")
else:
source = source[0:speech_length]
assert speech_length == source.shape[0]
return source
def _mix_speech_(self, uid, data):
# pick sources
source_keys = self._pick_source_utterances_(uid)
# load audios
speech_length = data[self.mixture_source_name].shape[0]
ref_audios = [self._read_source_(key, speech_length) for key in source_keys]
ref_audios = [data[self.mixture_source_name]] + ref_audios
# apply random gain to speech sources
gain_in_db = [
random.uniform(-self.dynamic_mixing_gain_db, self.dynamic_mixing_gain_db)
for i in range(len(ref_audios))
]
gain = [10 ** (g_db / 20.0) for g_db in gain_in_db]
ref_audios = [ref * g for ref, g in zip(ref_audios, gain)]
speech_mix = np.sum(np.array(ref_audios), axis=0)
for i, ref in enumerate(ref_audios):
data[f"{self.speech_ref_name_prefix}{i+1}"] = ref
data[self.speech_name] = speech_mix
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
# TODO(Chenda): need to test for multi-channel data.
assert (
len(data[self.mixture_source_name].shape) == 1
), "Multi-channel input has not been tested"
# Add the category information (an integer) to `data`
if not self.categories and "category" in data:
raise ValueError(
"categories must be set in the config file when utt2category files "
"exist in the data directory (e.g., dump/raw/*/utt2category)"
)
if self.categories and "category" in data:
category = data.pop("category")
assert category in self.categories, category
data["utt2category"] = np.array([self.categories[category]])
if self.train:
data = self._mix_speech_(uid, data)
assert check_return_type(data)
return data
class EnhPreprocessor(CommonPreprocessor):
"""Preprocessor for Speech Enhancement (Enh) task."""
def __init__(
self,
train: bool,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
noise_ref_name_prefix: str = "noise_ref",
dereverb_ref_name_prefix: str = "dereverb_ref",
use_reverberant_ref: bool = False,
num_spk: int = 1,
num_noise_type: int = 1,
sample_rate: int = 8000,
force_single_channel: bool = False,
channel_reordering: bool = False,
categories: Optional[List] = None,
):
super().__init__(
train=train,
token_type=None,
token_list=None,
bpemodel=None,
text_cleaner=None,
g2p_type=None,
unk_symbol="<unk>",
space_symbol="<space>",
non_linguistic_symbols=None,
delimiter=None,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
)
self.speech_ref_name_prefix = speech_ref_name_prefix
self.noise_ref_name_prefix = noise_ref_name_prefix
self.dereverb_ref_name_prefix = dereverb_ref_name_prefix
self.use_reverberant_ref = use_reverberant_ref
self.num_spk = num_spk
self.num_noise_type = num_noise_type
self.sample_rate = sample_rate
self.rir_scp = rir_scp
self.noise_scp = noise_scp
self.noise_db_range = noise_db_range
# Whether to always convert the signals to single-channel
self.force_single_channel = force_single_channel
# If True, randomly reorder the channels of the multi-channel signals
self.channel_reordering = channel_reordering
# Map each category into a unique integer
self.categories = {}
if categories:
count = 0
for c in categories:
if c not in self.categories:
self.categories[c] = count
count += 1
if self.speech_volume_normalize is not None:
sps = speech_volume_normalize.split("_")
if len(sps) == 1:
self.volume_low, self.volume_high = float(sps[0])
elif len(sps) == 2:
self.volume_low, self.volume_high = float(sps[0]), float(sps[1])
else:
raise ValueError(
"Format error for --speech_volume_normalize: "
f"'{speech_volume_normalize}'"
)
def __basic_str__(self):
msg = f", num_spk={self.num_spk}"
for key in (
"force_single_channel",
"channel_reordering",
"speech_volume_normalize",
):
if getattr(self, key):
msg += f", {key}={getattr(self, key)}"
if self.rirs is not None and self.rir_apply_prob > 0:
msg += f", sample_rate={self.sample_rate}"
msg += f", rir_scp={self.rir_scp}, rir_apply_prob={self.rir_apply_prob}"
if self.use_reverberant_ref:
msg += f", use_reverberant_ref={self.use_reverberant_ref}"
if self.noises is not None and self.noise_apply_prob > 0:
msg += f", noise_scp={self.noise_scp}"
msg += f", noise_apply_prob={self.noise_apply_prob}"
msg += f", noise_db_range={self.noise_db_range}"
if self.categories:
if len(self.categories) <= 10:
msg += f", categories={self.categories}"
else:
msg += f", num_category={len(self.categories)}"
return msg
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
msg = f"{name}(train={self.train}"
msg += self.__basic_str__()
return msg + ")"
def _ensure_2d(self, signal):
if isinstance(signal, tuple):
return tuple(self._ensure_2d(sig) for sig in signal)
elif isinstance(signal, list):
return [self._ensure_2d(sig) for sig in signal]
else:
# (Nmic, Time)
return signal[None, :] if signal.ndim == 1 else signal.T
def _get_early_signal(self, speech, rir, power):
predelay = 50 # milliseconds
dt = np.argmax(rir, axis=1).min()
et = dt + (predelay * self.sample_rate) // 1000
rir_early = rir[:, :et]
speech2 = scipy.signal.convolve(speech, rir_early, mode="full")[
:, : speech.shape[1]
]
# Reverse mean power to the original power
power2 = (speech2[detect_non_silence(speech2)] ** 2).mean()
speech2 = np.sqrt(power / max(power2, 1e-10)) * speech2
return speech2
def _apply_to_all_signals(self, data_dict, func):
data_dict[self.speech_name] = func(data_dict[self.speech_name])
for n in range(self.num_noise_type):
noise_name = self.noise_ref_name_prefix + str(n + 1)
if noise_name in data_dict:
data_dict[noise_name] = func(data_dict[noise_name])
for spk in range(self.num_spk):
speech_ref_name = self.speech_ref_name_prefix + str(spk + 1)
if self.train or speech_ref_name in data_dict:
data_dict[speech_ref_name] = func(data_dict[speech_ref_name])
dereverb_ref_name = self.dereverb_ref_name_prefix + str(spk + 1)
if dereverb_ref_name in data_dict:
data_dict[dereverb_ref_name] = func(data_dict[dereverb_ref_name])
def _speech_process(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
if self.speech_name not in data:
assert check_return_type(data)
return data
speech_mix = data[self.speech_name]
# Reorder channels of the multi-channel signals
if speech_mix.ndim > 1 and self.channel_reordering and self.train:
num_ch = speech_mix.shape[-1]
# chs = np.random.choice(range(num_ch), size=num_ch, replace=False).tolist()
chs = np.random.permutation(num_ch).tolist()
data[self.speech_name] = speech_mix[..., chs]
for i in range(self.num_spk):
k = self.speech_ref_name_prefix + str(i + 1)
if data[k].ndim > 1:
assert data[k].shape == speech_mix.shape
data[k] = data[k][..., chs]
# Add the category information (an integer) to `data`
if not self.categories and "category" in data:
raise ValueError(
"categories must be set in the config file when utt2category files "
"exist in the data directory (e.g., dump/raw/*/utt2category)"
)
if self.categories and "category" in data:
category = data.pop("category")
assert category in self.categories, category
data["utt2category"] = np.array([self.categories[category]])
if self.train:
# clean speech signal (Nmic, Time)
speech_ref = [
self._ensure_2d(data[self.speech_ref_name_prefix + str(i + 1)])
for i in range(self.num_spk)
]
# dereverberated (noisy) signal (Nmic, Time)
if "dereverb_ref1" in data:
dereverb_speech_ref = [
self._ensure_2d(data[self.dereverb_ref_name_prefix + str(i + 1)])
for i in range(self.num_spk)
if self.dereverb_ref_name_prefix + str(i + 1) in data
]
assert len(dereverb_speech_ref) in (1, self.num_spk), len(
dereverb_speech_ref
)
else:
dereverb_speech_ref = None
# Calc power on non silence region
power_ref = [
(sref[detect_non_silence(sref)] ** 2).mean() for sref in speech_ref
]
speech_mix = self._ensure_2d(data[self.speech_name])
# 1. Convolve RIR
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
if self.noise_ref_name_prefix + "1" in data:
noise = data[self.noise_ref_name_prefix + "1"]
np.testing.assert_allclose(
np.squeeze(sum(speech_ref) + noise), np.squeeze(speech_mix)
)
else:
np.testing.assert_allclose(
np.squeeze(sum(speech_ref)), np.squeeze(speech_mix)
)
speech_ref, rir_ref = zip(
*[
self._convolve_rir(sp, power, self.rirs)
for sp, power in zip(speech_ref, power_ref)
]
)
if self.force_single_channel:
speech_ref = list(
map(lambda x: x if x.shape[0] == 1 else x[:1], speech_ref)
)
rir_ref = list(
map(lambda x: x if x.shape[0] == 1 else x[:1], rir_ref)
)
if self.use_reverberant_ref:
for spk in range(self.num_spk):
suffix = str(spk + 1)
speech_ref_name = self.speech_ref_name_prefix + suffix
# (Time, Nmic)
data[speech_ref_name] = speech_ref[spk].T
if dereverb_speech_ref is not None:
if spk == 0 or len(dereverb_speech_ref) > 1:
dereverb_name = self.dereverb_ref_name_prefix + suffix
data[dereverb_name] = self._get_early_signal(
speech_ref[spk], rir_ref[spk], power_ref[spk]
).T
else:
for spk in range(self.num_spk):
suffix = str(spk + 1)
speech_ref_name = self.speech_ref_name_prefix + suffix
# clean speech with early reflections (Time, Nmic)
data[speech_ref_name] = self._get_early_signal(
speech_ref[spk], rir_ref[spk], power_ref[spk]
).T
if dereverb_speech_ref is not None:
if spk == 0 or len(dereverb_speech_ref) > 1:
dereverb_name = self.dereverb_ref_name_prefix + suffix
data[dereverb_name] = data[speech_ref_name]
if self.noise_ref_name_prefix + "1" in data:
speech_mix = sum(speech_ref) + noise
else:
speech_mix = sum(speech_ref)
# 2. Add Noise
if self.noises is not None and self.noise_apply_prob >= np.random.random():
if self.noise_ref_name_prefix + "1" in data:
speech_mix -= data[self.noise_ref_name_prefix + "1"]
power_mix = (speech_mix[detect_non_silence(speech_mix)] ** 2).mean()
speech_mix, noise = self._add_noise(
speech_mix,
power_mix,
self.noises,
self.noise_db_low,
self.noise_db_high,
)
if self.force_single_channel:
if speech_mix.shape[0] > 1:
speech_mix = speech_mix[:1]
if noise.shape[0] > 1:
noise = noise[:1]
for n in range(1, self.num_noise_type):
name = self.noise_ref_name_prefix + str(n + 1)
data.pop(name, None)
data[self.noise_ref_name_prefix + "1"] = noise.T
speech_mix = speech_mix.T
data[self.speech_name] = speech_mix
ma = np.max(np.abs(speech_mix))
if ma > 1.0:
self._apply_to_all_signals(data, lambda x: x / ma)
self._apply_to_all_signals(data, lambda x: x.squeeze())
if self.force_single_channel:
self._apply_to_all_signals(data, lambda x: x if x.ndim == 1 else x[:, 0])
if self.speech_volume_normalize is not None:
if self.train:
volume_scale = np.random.uniform(self.volume_low, self.volume_high)
else:
# use a fixed scale to make it deterministic
volume_scale = self.volume_low
speech_mix = data[self.speech_name]
ma = np.max(np.abs(speech_mix))
self._apply_to_all_signals(data, lambda x: x * volume_scale / ma)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(uid, data)
data = self._text_process(data)
return data
class SVSPreprocessor(AbsPreprocessor):
"""Preprocessor for Sing Voice Sythesis (SVS) task."""
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
singing_volume_normalize: float = None,
singing_name: str = "singing",
text_name: str = "text",
label_name: str = "label",
midi_name: str = "score",
fs: np.int32 = 0,
hop_length: np.int32 = 256,
phn_seg: dict = {
1: [1],
2: [0.25, 1],
3: [0.1, 0.5, 1],
4: [0.05, 0.1, 0.5, 1],
},
):
super().__init__(train)
self.train = train
self.singing_name = singing_name
self.text_name = text_name
self.label_name = label_name
self.midi_name = midi_name
self.fs = fs
self.hop_length = hop_length
self.singing_volume_normalize = singing_volume_normalize
self.phn_seg = phn_seg
self.time_shift = hop_length / fs
if token_type is not None:
if token_list is None:
raise ValueError("token_list is required if token_type is not None")
self.text_cleaner = TextCleaner(text_cleaner)
self.tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
self.token_id_converter = TokenIDConverter(
token_list=token_list,
unk_symbol=unk_symbol,
)
else:
self.text_cleaner = None
self.tokenizer = None
self.token_id_converter = None
def __call__(
self,
uid: str,
data: Dict[str, Union[str, np.ndarray, tuple]],
) -> Dict[str, np.ndarray]:
assert check_argument_types()
if self.singing_name in data:
if self.singing_volume_normalize is not None:
singing = data[self.singing_name]
ma = np.max(np.abs(singing))
data[self.singing_name] = singing * self.singing_volume_normalize / ma
if self.midi_name in data and self.label_name in data:
# Load label info
lab_timeseq, text = data[self.label_name]
lab_len = len(text)
text = " ".join(text)
text = self.text_cleaner(text)
text = text.split(" ")
text_ints = self.token_id_converter.tokens2ids(text)
data.pop(self.label_name)
label = np.zeros((lab_len))
midi = np.zeros((lab_len))
duration_phn = np.zeros((lab_len))
duration_ruled_phn = np.zeros((lab_len))
duration_syb = np.zeros((lab_len))
slur = np.zeros((lab_len))
# Load score info
tempo, syb_info = data[self.midi_name]
phn_cnt = []
# Calculate features
index_lab = 0
for st, et, syb, note, phns in syb_info:
dur = et - st
_duration_syb = int(dur / self.time_shift + 0.5)
phone = phns.split("_")
phn_num = len(phone)
phn_cnt.append(phn_num)
pre_seg = 0
for k in range(phn_num):
_duration_ruled_phn = int(
(self.phn_seg[phn_num][k] - pre_seg) * dur / self.time_shift
+ 0.5
)
pre_seg = self.phn_seg[phn_num][k]
# timeseq from lab
assert text[index_lab] == phone[k]
_duration_phn = int(
(lab_timeseq[index_lab][1] - lab_timeseq[index_lab][0])
/ self.time_shift
+ 0.5
)
# phone level feature
label[index_lab] = text_ints[index_lab]
midi[index_lab] = note
duration_phn[index_lab] = _duration_phn
duration_ruled_phn[index_lab] = _duration_ruled_phn
duration_syb[index_lab] = _duration_syb
if syb == "—":
slur[index_lab] = 1
else:
slur[index_lab] = 0
index_lab += 1
assert index_lab == lab_len
data.pop(self.midi_name)
phn_cnt = np.array(phn_cnt)
label = label.astype(np.int64)
midi = midi.astype(np.int64)
duration_phn = duration_phn.astype(np.int64)
duration_syb = duration_syb.astype(np.int64)
duration_ruled_phn = duration_ruled_phn.astype(np.int64)
phn_cnt = phn_cnt.astype(np.int64)
slur = slur.astype(np.int64)
data["label"] = label
data["midi"] = midi
data["duration_phn"] = duration_phn
data["duration_ruled_phn"] = duration_ruled_phn
data["duration_syb"] = duration_syb
data["phn_cnt"] = phn_cnt
data["slur"] = slur
# TODO(Yuning): Add score from midi
if self.text_name in data and self.tokenizer is not None:
# FIX ME (Yuning): wrong transfer happen in pyopenjtalk
text = data[self.text_name]
if not isinstance(text, np.ndarray):
if not isinstance(text, str):
text = " ".join(text)
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
_text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(_text_ints, dtype=np.int64)
return data
class TSEPreprocessor(EnhPreprocessor):
"""Preprocessor for Target Speaker Extraction."""
def __init__(
self,
train: bool,
train_spk2enroll: str = None,
enroll_segment: int = None,
load_spk_embedding: bool = False,
load_all_speakers: bool = False,
# inherited from EnhPreprocessor
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
noise_ref_name_prefix: str = "noise_ref",
dereverb_ref_name_prefix: str = "dereverb_ref",
use_reverberant_ref: bool = False,
num_spk: int = 1,
num_noise_type: int = 1,
sample_rate: int = 8000,
force_single_channel: bool = False,
channel_reordering: bool = False,
categories: Optional[List] = None,
):
super().__init__(
train,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
speech_ref_name_prefix=speech_ref_name_prefix,
noise_ref_name_prefix=noise_ref_name_prefix,
dereverb_ref_name_prefix=dereverb_ref_name_prefix,
use_reverberant_ref=use_reverberant_ref,
num_spk=num_spk,
num_noise_type=num_noise_type,
sample_rate=sample_rate,
force_single_channel=force_single_channel,
channel_reordering=channel_reordering,
categories=categories,
)
# If specified, the enrollment will be chomped to the specified length
self.enroll_segment = enroll_segment
# If True, the speaker embedding will be loaded instead of enrollment audios
self.load_spk_embedding = load_spk_embedding
# If False, only one of the speakers in each mixture sample will be loaded
self.load_all_speakers = load_all_speakers
if train and rir_scp is not None and rir_apply_prob > 0:
logging.warning(
"Be cautious when applying RIRs on the fly in the TSE task! "
"Please ensure `speech_ref` sums up to `speech_mix` for each sample."
)
if train:
if train_spk2enroll is None:
logging.info("Using fixed enrollment for each sample")
self.train_spk2enroll = None
else:
logging.info("Using dynamically sampled enrollment for each sample")
with open(train_spk2enroll, "r", encoding="utf-8") as f:
# {spkID: [(uid1, path1), (uid2, path2), ...]}
self.train_spk2enroll = json.load(f)
else:
self.train_spk2enroll = None
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
msg = f"{name}(train={self.train}"
if self.train_spk2enroll:
msg += f", len(train_spk2enroll)={len(self.train_spk2enroll)}"
for key in ("enroll_segment", "load_spk_embedding", "load_all_speakers"):
if getattr(self, key):
msg += f", {key}={getattr(self, key)}"
msg += self.__basic_str__()
return msg + ")"
def _read_audio_segment(self, path, seg_len=None):
with soundfile.SoundFile(path) as f:
if seg_len is None or f.frames == seg_len:
audio = f.read(dtype=np.float32, always_2d=True)
elif f.frames < seg_len:
offset = np.random.randint(0, seg_len - f.frames)
# audio: (Time, Nmic)
audio = f.read(dtype=np.float32, always_2d=True)
# Repeat audio
audio = np.pad(
audio,
[(offset, seg_len - f.frames - offset), (0, 0)],
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - seg_len)
f.seek(offset)
# audio: (Time, Nmic)
audio = f.read(seg_len, dtype=np.float32, always_2d=True)
if len(audio) != seg_len:
raise RuntimeError(f"Something wrong: {path}")
return audio[:, 0]
def _speech_process(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
ref_names = [k for k in data.keys() if re.match(r"speech_ref\d+", k)]
num_spk = len(ref_names)
aux_names = [k for k in data.keys() if re.match(r"enroll_ref\d+", k)]
if self.train:
assert len(ref_names) == len(aux_names), (len(ref_names), len(aux_names))
if not self.load_all_speakers:
# only load one target-speaker data
spk = np.random.randint(0, num_spk)
for i, name in enumerate(ref_names):
if i == 0:
data[name] = data[ref_names[spk]]
else:
data.pop(name)
continue
for i, name in enumerate(aux_names):
if not self.load_all_speakers:
if i == 0:
data[name] = data[aux_names[spk]]
else:
data.pop(name)
continue
if self.train_spk2enroll is None:
# normal format in `enroll_spk?.scp`:
# MIXTURE_UID /path/to/enrollment_or_embedding
aux_audio = data[name]
else:
# a special format in `enroll_spk?.scp`:
# MIXTURE_UID *UID SPEAKER_ID
assert data[name].startswith("*"), data[name]
cur_uid, spkid = data[name][1:].strip().split(maxsplit=1)
aux_uid, aux_audio = random.choice(self.train_spk2enroll[spkid])
while aux_uid == cur_uid:
aux_uid, aux_audio = random.choice(self.train_spk2enroll[spkid])
if getattr(self, "load_spk_embedding", False):
data[name] = np.load(aux_audio)[None, :] # force 2D
elif self.enroll_segment:
data[name] = self._read_audio_segment(
aux_audio, self.enroll_segment
)
else:
data[name] = soundfile.read(aux_audio)[0]
else:
for name in aux_names:
if data[name].startswith("*"):
# in case of collecting stats for training data
data[name] = np.zeros(1, dtype=data["speech_mix"].dtype)
else:
if getattr(self, "load_spk_embedding", False):
data[name] = np.load(data[name])[None, :] # force 2D
elif self.enroll_segment:
data[name] = self._read_audio_segment(
data[name], self.enroll_segment
)
else:
data[name] = soundfile.read(data[name])[0]
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = super()._speech_process(uid, data)
data = self._speech_process(uid, data)
return data
class SpkPreprocessor(CommonPreprocessor):
"""Preprocessor for Speaker tasks.
Args:
train (bool): Whether to use in training mode.
spk2utt (str): Path to the `spk2utt` file.
target_duration (float): Target duration in seconds.
sample_rate (int): Sampling rate.
num_eval (int): Number of utterances to be used for evaluation.
rir_scp (str): Path to the RIR scp file.
rir_apply_prob (float): Probability of applying RIR.
noise_info (List[Tuple[float, str, Tuple[int, int], Tuple[float, float]]]):
List of tuples of noise information. Each tuple represents a noise type.
Each tuple consists of `(prob, noise_scp, num_to_mix, db_range)`.
- `prob` (float) is the probability of applying the noise type.
- `noise_scp` (str) is the path to the noise scp file.
- `num_to_mix` (Tuple[int, int]) is the range of the number of noises
to be mixed.
- `db_range` (Tuple[float, float]) is the range of noise levels in dB.
noise_apply_prob (float): Probability of applying noise.
short_noise_thres (float): Threshold of short noise.
"""
def __init__(
self,
train: bool,
spk2utt: str,
target_duration: float, # in seconds
sample_rate: int = 16000,
num_eval: int = 10,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_info: List[
Tuple[float, str, Tuple[int, int], Tuple[float, float]]
] = None,
noise_apply_prob: float = 1.0,
short_noise_thres: float = 0.5,
):
super().__init__(train, rir_scp=rir_scp, rir_apply_prob=rir_apply_prob)
with open(spk2utt, "r") as f_s2u:
self.spk2utt = f_s2u.readlines()
self.nspk = len(self.spk2utt)
self.spk2label = None # a dictionary that maps string speaker label to int
self.sample_rate = sample_rate
self.target_duration = int(target_duration * sample_rate)
self.num_eval = num_eval
self._make_label_mapping()
self.rir_scp = rir_scp
self.noise_apply_prob = noise_apply_prob
self.short_noise_thres = short_noise_thres
self.noises = []
self.noise_probs = []
self.noise_db_ranges = []
self.noise_num_to_mix = []
if noise_apply_prob > 0:
for prob, noise_scp, num_to_mix, db_range in noise_info:
if prob > 0:
assert len(db_range) == 2, db_range
assert db_range[0] <= db_range[1], db_range
assert len(num_to_mix) == 2, num_to_mix
assert num_to_mix[0] <= num_to_mix[1], num_to_mix
self.noise_probs.append(prob)
self.noise_db_ranges.append(tuple(db_range))
self.noise_num_to_mix.append(num_to_mix)
noises = []
with open(noise_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
noises.append(sps[0])
else:
noises.append(sps[1])
self.noises.append(noises)
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
msg = f"{name}(train={self.train}"
if self.spk2label:
msg += f", len(spk2label)={len(self.spk2label)}"
for key in ("target_duration", "sample_rate", "num_eval"):
if getattr(self, key):
msg += f", {key}={getattr(self, key)}"
if self.rirs is not None and self.rir_apply_prob > 0:
msg += f", rir_scp={self.rir_scp}, rir_apply_prob={self.rir_apply_prob}"
if self.noise_apply_prob > 0 and self.noises:
msg += f", noise_apply_prob={self.noise_apply_prob}"
msg += f", noises.shapes={[len(n) for n in self.noises]}"
msg += f", noise_probs={self.noise_probs}"
msg += f", noise_db_ranges={self.noise_db_ranges}"
msg += f", noise_num_to_mix={self.noise_num_to_mix}"
return msg + ")"
def _make_label_mapping(self):
label_idx = 0
self.spk2label = {}
for spk in self.spk2utt:
spk = spk.strip().split(" ")[0]
self.spk2label[spk] = label_idx
label_idx += 1
def _speech_process(self, data: Dict[np.ndarray, str]):
if self.train:
audio = data["speech"]
# duplicate if utt is shorter than minimum required duration
if len(audio) < self.target_duration:
shortage = self.target_duration - len(audio) + 1
audio = np.pad(audio, (0, shortage), "wrap")
startframe = np.array(
[np.int64(random.random() * (len(audio) - self.target_duration))]
)
data["speech"] = audio[
int(startframe) : int(startframe) + self.target_duration
]
if self.noise_apply_prob > 0 or self.rir_apply_prob > 0:
data["speech"] = self._apply_data_augmentation(data["speech"])
else:
audio = data["speech"]
audio2 = data["speech2"]
# duplicate if utt is shorter than minimum required duration
if len(audio) < self.target_duration:
shortage = self.target_duration - len(audio) + 1
audio = np.pad(audio, (0, shortage), "wrap")
if len(audio2) < self.target_duration:
shortage = self.target_duration - len(audio2) + 1
audio2 = np.pad(audio2, (0, shortage), "wrap")
startframe = np.linspace(
0, len(audio) - self.target_duration, num=self.num_eval
)
audios = []
for frame in startframe:
audios.append(audio[int(frame) : int(frame) + self.target_duration])
audios = np.stack(audios, axis=0)
startframe2 = np.linspace(
0, len(audio2) - self.target_duration, num=self.num_eval
)
audios2 = []
for frame in startframe2:
audios2.append(audio2[int(frame) : int(frame) + self.target_duration])
audios2 = np.stack(audios2, axis=0)
data["speech"] = audios
data["speech2"] = audios2
return data
def _convolve_rir(self, speech, rirs):
rir_path = np.random.choice(rirs)
rir = None
if rir_path is not None:
rir, _ = soundfile.read(rir_path, dtype=np.float64, always_2d=True)
# rir: (Nmic, Time)
rir = rir.T
# normalize rir
rir = rir / np.sqrt(np.sum(rir**2))
# speech: (Nmic, Time)
# Note that this operation doesn't change the signal length
speech = scipy.signal.convolve(speech, rir, mode="full")[
:, : speech.shape[1]
]
return speech, rir
def _load_noise(self, speech, speech_db, noises, noise_db_low, noise_db_high):
nsamples = speech.shape[1]
noise_path = np.random.choice(noises)
noise = None
if noise_path is not None:
noise_snr = np.random.uniform(noise_db_low, noise_db_high)
with soundfile.SoundFile(noise_path) as f:
if f.frames == nsamples:
noise = f.read(dtype=np.float64)
elif f.frames < nsamples:
# noise: (Time,)
noise = f.read(dtype=np.float64)
# Repeat noise
noise = np.pad(
noise,
(0, nsamples - f.frames),
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - nsamples)
f.seek(offset)
# noise: (Time,)
noise = f.read(nsamples, dtype=np.float64)
if len(noise) != nsamples:
raise RuntimeError(f"Something wrong: {noise_path}")
# noise: (Nmic, Time)
noise = noise[None, :]
noise_power = np.mean(noise**2)
noise_db = 10 * np.log10(noise_power + 1e-4)
scale = np.sqrt(10 ** ((speech_db - noise_db - noise_snr) / 10))
noise = noise * scale
return noise
def _apply_data_augmentation(self, speech):
# speech: (Nmic, Time)
if speech.ndim == 1:
speech = speech[None, :]
else:
speech = speech.T
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
speech, _ = self._convolve_rir(speech, self.rirs)
if self.noises and self.noise_apply_prob >= np.random.random():
idx = random.choices(
range(len(self.noises)), weights=self.noise_probs, k=1
)[0]
low, high = self.noise_num_to_mix[idx]
if low == high:
num_to_mix = low
else:
num_to_mix = np.random.randint(low, high + 1)
# add eps of 1e-4 to avoid negative value before log
speech_db = 10 * np.log10(np.mean(speech**2) + 1e-4)
noiselist = []
for _ in range(num_to_mix):
noise = self._load_noise(
speech, # original speech
speech_db, # db of speech
self.noises[idx], # a list of a type of noise
self.noise_db_ranges[idx][0], # min db
self.noise_db_ranges[idx][1], # max db
)
noiselist.append(noise)
noise = np.sum(np.concatenate(noiselist, axis=0), axis=0, keepdims=True)
speech = speech + noise
speech = np.squeeze(speech, axis=0)
return speech
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
"""
Make speaker labels into integers
"""
if self.train:
int_label = self.spk2label[data["spk_labels"]]
data["spk_labels"] = np.asarray([int_label], dtype=np.int64)
else:
data["spk_labels"] = np.asarray([int(data["spk_labels"])])
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._text_process(data)
data = self._speech_process(data)
return data
| [] |
2024-01-10 | dynamic-superb/espnet-whisper | espnet2~bin~asr_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from distutils.version import LooseVersion
from itertools import groupby
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
from typeguard import check_argument_types, check_return_type
from espnet2.asr.decoder.hugging_face_transformers_decoder import (
get_hugging_face_model_lm_head,
get_hugging_face_model_network,
)
from espnet2.asr.decoder.s4_decoder import S4Decoder
from espnet2.asr.transducer.beam_search_transducer import BeamSearchTransducer
from espnet2.asr.transducer.beam_search_transducer import (
ExtendedHypothesis as ExtTransHypothesis,
)
from espnet2.asr.transducer.beam_search_transducer import Hypothesis as TransHypothesis
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.asr import ASRTask
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.tasks.lm import LMTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.hugging_face_token_id_converter import HuggingFaceTokenIDConverter
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.text.whisper_token_id_converter import OpenAIWhisperTokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.batch_beam_search_online_sim import BatchBeamSearchOnlineSim
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.beam_search_timesync import BeamSearchTimeSync
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
try:
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM
from transformers.file_utils import ModelOutput
is_transformers_available = True
except ImportError:
is_transformers_available = False
# Alias for typing
ListOfHypothesis = List[
Tuple[
Optional[str],
List[str],
List[int],
Union[Hypothesis, ExtTransHypothesis, TransHypothesis],
]
]
class Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
>>> speech2text = Speech2Text("asr_config.yml", "asr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
asr_train_config: Union[Path, str] = None,
asr_model_file: Union[Path, str] = None,
transducer_conf: dict = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
ngram_weight: float = 0.9,
penalty: float = 0.0,
nbest: int = 1,
streaming: bool = False,
enh_s2t_task: bool = False,
quantize_asr_model: bool = False,
quantize_lm: bool = False,
quantize_modules: List[str] = ["Linear"],
quantize_dtype: str = "qint8",
hugging_face_decoder: bool = False,
hugging_face_decoder_conf: Dict[str, Any] = {},
time_sync: bool = False,
multi_asr: bool = False,
lang_prompt_token: Optional[str] = None,
nlp_prompt_token: Optional[str] = None,
prompt_token_file: str = "/projects/bbjs/arora1/new_download/espnet/egs2/stop/asr2_combined/add_tokens-Copy1.txt",
):
assert check_argument_types()
task = ASRTask if not enh_s2t_task else EnhS2TTask
if quantize_asr_model or quantize_lm:
if quantize_dtype == "float16" and torch.__version__ < LooseVersion(
"1.5.0"
):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with "
"torch version < 1.5.0. Switch to qint8 dtype instead."
)
quantize_modules = set([getattr(torch.nn, q) for q in quantize_modules])
quantize_dtype = getattr(torch, quantize_dtype)
# 1. Build ASR model
scorers = {}
asr_model, asr_train_args = task.build_model_from_file(
asr_train_config, asr_model_file, device
)
if enh_s2t_task:
asr_model.inherite_attributes(
inherite_s2t_attrs=[
"ctc",
"decoder",
"eos",
"joint_network",
"sos",
"token_list",
"use_transducer_decoder",
]
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
if quantize_asr_model:
logging.info("Use quantized asr model for decoding.")
asr_model = torch.quantization.quantize_dynamic(
asr_model, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
decoder = asr_model.decoder
ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
token_list = asr_model.token_list
scorers.update(
decoder=decoder,
ctc=ctc,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
if quantize_lm:
logging.info("Use quantized lm for decoding.")
lm = torch.quantization.quantize_dynamic(
lm, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
scorers["lm"] = lm.lm
# 3. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
scorers["ngram"] = ngram
# 4. Build BeamSearch object
if asr_model.use_transducer_decoder:
# In multi-blank RNNT, we assume all big blanks are
# just before the standard blank in token_list
multi_blank_durations = getattr(
asr_model, "transducer_multi_blank_durations", []
)[::-1] + [1]
multi_blank_indices = [
asr_model.blank_id - i + 1
for i in range(len(multi_blank_durations), 0, -1)
]
if transducer_conf is None:
transducer_conf = {}
beam_search_transducer = BeamSearchTransducer(
decoder=asr_model.decoder,
joint_network=asr_model.joint_network,
beam_size=beam_size,
lm=scorers["lm"] if "lm" in scorers else None,
lm_weight=lm_weight,
multi_blank_durations=multi_blank_durations,
multi_blank_indices=multi_blank_indices,
token_list=token_list,
**transducer_conf,
)
beam_search = None
hugging_face_model = None
hugging_face_linear_in = None
elif (
decoder.__class__.__name__ == "HuggingFaceTransformersDecoder"
and hugging_face_decoder
):
if not is_transformers_available:
raise ImportError(
"`transformers` is not available."
" Please install it via `pip install transformers`"
" or `cd /path/to/espnet/tools && . ./activate_python.sh"
" && ./installers/install_transformers.sh`."
)
if decoder.causal_lm:
hugging_face_model = AutoModelForCausalLM.from_pretrained(
decoder.model_name_or_path
)
hugging_face_model.resize_token_embeddings(decoder.lm_head.out_features)
transformer = get_hugging_face_model_network(hugging_face_model)
transformer.load_state_dict(decoder.decoder.state_dict())
lm_head = get_hugging_face_model_lm_head(hugging_face_model)
lm_head.load_state_dict(decoder.lm_head.state_dict())
else:
hugging_face_model = AutoModelForSeq2SeqLM.from_pretrained(
decoder.model_name_or_path
)
hugging_face_model.lm_head.load_state_dict(decoder.lm_head.state_dict())
if hasattr(hugging_face_model, "model"):
hugging_face_model.model.decoder.load_state_dict(
decoder.decoder.state_dict()
)
del hugging_face_model.model.encoder
else:
hugging_face_model.decoder.load_state_dict(
decoder.decoder.state_dict()
)
del hugging_face_model.encoder
del asr_model.decoder.lm_head
del asr_model.decoder.decoder
hugging_face_linear_in = decoder.linear_in
hugging_face_model.to(device=device).eval()
if "num_beams" not in hugging_face_decoder_conf:
hugging_face_decoder_conf[
"num_beams"
] = hugging_face_model.config.num_beams
if (
hugging_face_model.config.pad_token_id is None
and "pad_token_id" not in hugging_face_decoder_conf
):
hugging_face_decoder_conf[
"pad_token_id"
] = hugging_face_model.config.eos_token_id
beam_search = None
beam_search_transducer = None
else:
beam_search_transducer = None
hugging_face_model = None
hugging_face_linear_in = None
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=lm_weight,
ngram=ngram_weight,
length_bonus=penalty,
)
if time_sync:
if not hasattr(asr_model, "ctc"):
raise NotImplementedError(
"BeamSearchTimeSync without CTC is not supported."
)
if batch_size != 1:
raise NotImplementedError(
"BeamSearchTimeSync with batching is not yet supported."
)
logging.info("BeamSearchTimeSync implementation is selected.")
scorers["ctc"] = asr_model.ctc
beam_search = BeamSearchTimeSync(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=asr_model.sos,
token_list=token_list,
)
else:
beam_search = BeamSearch(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=asr_model.sos,
eos=asr_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
if streaming:
beam_search.__class__ = BatchBeamSearchOnlineSim
beam_search.set_streaming_config(asr_train_config)
logging.info(
"BatchBeamSearchOnlineSim implementation is selected."
)
else:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type is None:
tokenizer = None
elif (
token_type == "bpe"
or token_type == "hugging_face"
or "whisper" in token_type
):
if bpemodel is not None:
if "whisper" in token_type:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel,non_linguistic_symbols=prompt_token_file)
else:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
if token_type == "hugging_face":
converter = HuggingFaceTokenIDConverter(model_name_or_path=bpemodel)
elif bpemodel not in ["whisper_en", "whisper_multilingual"]:
converter = TokenIDConverter(token_list=token_list)
else:
converter = OpenAIWhisperTokenIDConverter(model_type=bpemodel,added_tokens_txt=prompt_token_file)
if lang_prompt_token is not None:
a1=converter.tokenizer.tokenizer.convert_ids_to_tokens(converter.tokenizer.sot_sequence_including_notimestamps)
# a1[2]="<|ic|>"
a1=a1[:1]+lang_prompt_token.split()+a1[3:]
beam_search.set_hyp_primer(
list(converter.tokenizer.tokenizer.convert_tokens_to_ids(a1))
)
elif nlp_prompt_token is not None:
a1=converter.tokenizer.tokenizer.convert_ids_to_tokens(converter.tokenizer.sot_sequence_including_notimestamps)
# a1[2]="<|ic|>"
# import pdb;pdb.set_trace()
prompt_tokens = tokenizer.text2tokens(nlp_prompt_token)
# print(prompt_tokens)
# actual_token=[actual_token[0]]+prompt_tokens+actual_token[2:]
a1=a1[:2]+prompt_tokens+a1[3:]
beam_search.set_hyp_primer(
list(converter.tokenizer.tokenizer.convert_tokens_to_ids(a1))
)
else:
beam_search.set_hyp_primer(
list(converter.tokenizer.sot_sequence_including_notimestamps)
)
logging.info(f"Text tokenizer: {tokenizer}")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.beam_search_transducer = beam_search_transducer
self.hugging_face_model = hugging_face_model
self.hugging_face_linear_in = hugging_face_linear_in
self.hugging_face_decoder_conf = hugging_face_decoder_conf
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
self.enh_s2t_task = enh_s2t_task
self.multi_asr = multi_asr
@torch.no_grad()
def __call__(
self,
speech: Union[torch.Tensor, np.ndarray],
prompt: Union[torch.Tensor, np.ndarray] = None,
prefix: Union[torch.Tensor, np.ndarray] = None,
label: Union[torch.Tensor, np.ndarray] = None,
) -> Union[
ListOfHypothesis,
Tuple[
ListOfHypothesis,
Optional[Dict[int, List[str]]],
],
]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
logging.info("speech length: " + str(speech.size(1)))
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
enc, enc_olens = self.asr_model.encode(**batch)
if self.multi_asr:
enc = enc.unbind(dim=1) # (batch, num_inf, ...) -> num_inf x [batch, ...]
if self.enh_s2t_task or self.multi_asr:
# Enh+ASR joint task or Multispkr ASR task
# NOTE (Wangyou): the return type in this case is List[default_return_type]
if self.multi_asr:
num_spk = getattr(self.asr_model, "num_inf", 1)
else:
num_spk = getattr(self.asr_model.enh_model, "num_spk", 1)
assert len(enc) == num_spk, (len(enc), num_spk)
results = []
for spk, enc_spk in enumerate(enc, 1):
logging.info(f"=== [{str(self.asr_model.__class__)}] Speaker {spk} ===")
if isinstance(enc_spk, tuple):
enc_spk = enc_spk[0]
assert len(enc_spk) == 1, len(enc_spk)
# c. Passed the encoder result and the beam search
ret = self._decode_single_sample(enc_spk[0])
assert check_return_type(ret)
results.append(ret)
else:
# Normal ASR
intermediate_outs = None
if isinstance(enc, tuple):
intermediate_outs = enc[1]
enc = enc[0]
assert len(enc) == 1, len(enc)
# c. Passed the encoder result and the beam search
results = self._decode_single_sample(enc[0], prompt=prompt, label=label, prefix=prefix)
# Encoder intermediate CTC predictions
if intermediate_outs is not None:
encoder_interctc_res = self._decode_interctc(intermediate_outs)
results = (results, encoder_interctc_res)
assert check_return_type(results)
return results
def _decode_interctc(
self, intermediate_outs: List[Tuple[int, torch.Tensor]]
) -> Dict[int, List[str]]:
assert check_argument_types()
exclude_ids = [self.asr_model.blank_id, self.asr_model.sos, self.asr_model.eos]
res = {}
token_list = self.beam_search.token_list
for layer_idx, encoder_out in intermediate_outs:
y = self.asr_model.ctc.argmax(encoder_out)[0] # batch_size = 1
y = [x[0] for x in groupby(y) if x[0] not in exclude_ids]
y = [token_list[x] for x in y]
res[layer_idx] = y
return res
def _decode_single_sample(self, enc: torch.Tensor, prompt: torch.Tensor=None, prefix: torch.Tensor=None, label=None):
if self.beam_search_transducer:
logging.info("encoder output length: " + str(enc.shape[0]))
nbest_hyps = self.beam_search_transducer(enc)
best = nbest_hyps[0]
logging.info(f"total log probability: {best.score:.2f}")
logging.info(
f"normalized log probability: {best.score / len(best.yseq):.2f}"
)
logging.info(
"best hypo: " + "".join(self.converter.ids2tokens(best.yseq[1:])) + "\n"
)
elif self.hugging_face_model:
num_beams = self.hugging_face_decoder_conf["num_beams"]
enc = self.hugging_face_linear_in(enc).unsqueeze(0)
if self.asr_model.decoder.causal_lm:
forward_args, _ = self.asr_model.decoder.add_prefix_postfix(
enc,
torch.tensor([enc.shape[1]]).to(enc.device),
torch.ones([1, 1], dtype=int, device=enc.device),
torch.ones([1], dtype=int, device=enc.device),
)
# input_ids are ignored if we provide inputs_embeds,
# but input_ids are still required, so we make fake ones
input_ids = torch.ones(
[1, forward_args["inputs_embeds"].shape[1]],
dtype=int,
device=enc.device,
)
yseq = self.hugging_face_model.generate(
input_ids.repeat(num_beams, 1),
inputs_embeds=forward_args["inputs_embeds"].repeat(num_beams, 1, 1),
attention_mask=input_ids.repeat(num_beams, 1),
**self.hugging_face_decoder_conf,
)
yseq = yseq[:, input_ids.shape[1] - 1 :]
else:
decoder_start_token_id = (
self.hugging_face_model.config.decoder_start_token_id
)
yseq = self.hugging_face_model.generate(
encoder_outputs=ModelOutput(last_hidden_state=enc),
decoder_start_token_id=decoder_start_token_id,
**self.hugging_face_decoder_conf,
)
nbest_hyps = [Hypothesis(yseq=yseq[0])]
logging.info(
"best hypo: "
+ self.tokenizer.tokens2text(
self.converter.ids2tokens(nbest_hyps[0].yseq[1:])
)
+ "\n"
)
else:
if hasattr(self.beam_search.nn_dict, "decoder"):
if isinstance(self.beam_search.nn_dict.decoder, S4Decoder):
# Setup: required for S4 autoregressive generation
for module in self.beam_search.nn_dict.decoder.modules():
if hasattr(module, "setup_step"):
module.setup_step()
if prompt is not None and 50359 in self.beam_search.hyp_primer:
# Remove <|transcribe|>
self.beam_search.set_hyp_primer([50258, 50259, 50363])
nbest_hyps = self.beam_search(
x=enc, prompt=prompt, prefix=prefix, maxlenratio=self.maxlenratio, minlenratio=self.minlenratio, label=label
)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, (Hypothesis, TransHypothesis)), type(hyp)
# remove sos/eos and get results
last_pos = None if self.asr_model.use_transducer_decoder else -1
if isinstance(hyp.yseq, list):
token_int = hyp.yseq[1:last_pos]
else:
token_int = hyp.yseq[1:last_pos].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
if prompt is not None:
assert token_int[:len(prompt)-1] == prompt[1:].tolist()
token_int = token_int[len(prompt[1:]):]
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Text(**kwargs)
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
ngram_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
asr_train_config: Optional[str],
asr_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
transducer_conf: Optional[dict],
streaming: bool,
enh_s2t_task: bool,
quantize_asr_model: bool,
quantize_lm: bool,
quantize_modules: List[str],
quantize_dtype: str,
hugging_face_decoder: bool,
hugging_face_decoder_conf: Dict[str, Any],
time_sync: bool,
multi_asr: bool,
lang_prompt_token: Optional[str],
nlp_prompt_token: Optional[str],
prompt_token_file:str,
use_nlp_prompt: bool,
use_label: bool,
use_prefix: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text_kwargs = dict(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
transducer_conf=transducer_conf,
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
streaming=streaming,
enh_s2t_task=enh_s2t_task,
multi_asr=multi_asr,
quantize_asr_model=quantize_asr_model,
quantize_lm=quantize_lm,
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
hugging_face_decoder=hugging_face_decoder,
hugging_face_decoder_conf=hugging_face_decoder_conf,
time_sync=time_sync,
prompt_token_file=prompt_token_file,
lang_prompt_token=lang_prompt_token,
nlp_prompt_token=nlp_prompt_token,
)
speech2text = Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# Use prompt during inference
speech2text.asr_train_args.preprocessor_conf['text_name'] = 'text'
speech2text.asr_train_args.use_label = use_label
speech2text.asr_train_args.use_prefix = use_prefix
# 3. Build data-iterator
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False, ),
collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# batch['prompt'] = batch['text']
if 'prompt' in batch:
batch.pop('text')
# N-best list of (text, token, token_int, hyp_object)
try:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
if enh_s2t_task:
num_spk = getattr(speech2text.asr_model.enh_model, "num_spk", 1)
results = [results for _ in range(num_spk)]
# Only supporting batch_size==1
key = keys[0]
if enh_s2t_task or multi_asr:
# Enh+ASR joint task
for spk, ret in enumerate(results, 1):
for n, (text, token, token_int, hyp) in zip(
range(1, nbest + 1), ret
):
# Create a directory: outdir/{n}best_recog_spk?
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer[f"token_spk{spk}"][key] = " ".join(token)
ibest_writer[f"token_int_spk{spk}"][key] = " ".join(
map(str, token_int)
)
ibest_writer[f"score_spk{spk}"][key] = str(hyp.score)
if text is not None:
ibest_writer[f"text_spk{spk}"][key] = text
else:
# Normal ASR
encoder_interctc_res = None
if isinstance(results, tuple):
results, encoder_interctc_res = results
for n, (text, token, token_int, hyp) in zip(
range(1, nbest + 1), results
):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
# Write intermediate predictions to
# encoder_interctc_layer<layer_idx>.txt
ibest_writer = writer[f"1best_recog"]
if encoder_interctc_res is not None:
for idx, text in encoder_interctc_res.items():
ibest_writer[f"encoder_interctc_layer{idx}.txt"][
key
] = " ".join(text)
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=24,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--asr_train_config",
type=str,
help="ASR training configuration",
)
group.add_argument(
"--asr_model_file",
type=str,
help="ASR model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group.add_argument(
"--enh_s2t_task",
type=str2bool,
default=False,
help="Whether we are using an enhancement and ASR joint model",
)
group.add_argument(
"--multi_asr",
type=str2bool,
default=False,
help="Whether we are using a monolithic multi-speaker ASR model "
"(This flag should be False if a speech separation model is used before ASR)",
)
group = parser.add_argument_group("Quantization related")
group.add_argument(
"--quantize_asr_model",
type=str2bool,
default=False,
help="Apply dynamic quantization to ASR model.",
)
group.add_argument(
"--quantize_lm",
type=str2bool,
default=False,
help="Apply dynamic quantization to LM.",
)
group.add_argument(
"--quantize_modules",
type=str,
nargs="*",
default=["Linear"],
help="""List of modules to be dynamically quantized.
E.g.: --quantize_modules=[Linear,LSTM,GRU].
Each specified module should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
group.add_argument(
"--quantize_dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths."
"If maxlenratio<0.0, its absolute value is interpreted"
"as a constant max output length",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.5,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group.add_argument("--streaming", type=str2bool, default=False)
group.add_argument("--hugging_face_decoder", type=str2bool, default=False)
group.add_argument(
"--hugging_face_decoder_conf",
type=NestedDictAction,
default=dict(),
help="Custom kwargs for the HF .generate()",
)
group.add_argument(
"--transducer_conf",
default=None,
help="The keyword arguments for transducer beam search.",
)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
group.add_argument(
"--time_sync",
type=str2bool,
default=False,
help="Time synchronous beam search.",
)
group.add_argument(
"--lang_prompt_token",
type=str,
default=None,
help="Prompt token for mulitlingual prompting",
)
group.add_argument(
"--nlp_prompt_token",
type=str,
default=None,
help="Prompt token for natural language phrases as prompting",
)
group.add_argument(
"--prompt_token_file",
type=str,
default="/home/stan/espnet/egs2/stop/big_superb/add_tokens-Copy1.txt",
help="Prompt token file",
)
group.add_argument(
"--use_nlp_prompt",
type=bool,
default=True,
help="Use prompt in nlp form",
)
group.add_argument(
"--use_label",
type=str2bool,
default=True,
help="Use teacher forcing label",
)
group.add_argument(
"--use_prefix",
type=str2bool,
default=False,
help="Add prefix during decoding",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | dynamic-superb/espnet-whisper | espnet2~tasks~asr.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.decoder.hugging_face_transformers_decoder import ( # noqa: H301
HuggingFaceTransformersDecoder,
)
from espnet2.asr.decoder.mlm_decoder import MLMDecoder
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet2.asr.decoder.s4_decoder import S4Decoder
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.decoder.transformer_decoder import (
DynamicConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
LightweightConvolutionTransformerDecoder,
TransformerDecoder,
)
from espnet2.asr.decoder.whisper_decoder import OpenAIWhisperDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.branchformer_encoder import BranchformerEncoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.contextual_block_conformer_encoder import (
ContextualBlockConformerEncoder,
)
from espnet2.asr.encoder.contextual_block_transformer_encoder import (
ContextualBlockTransformerEncoder,
)
from espnet2.asr.encoder.e_branchformer_encoder import EBranchformerEncoder
from espnet2.asr.encoder.hubert_encoder import (
FairseqHubertEncoder,
FairseqHubertPretrainEncoder,
TorchAudioHuBERTPretrainEncoder,
)
from espnet2.asr.encoder.longformer_encoder import LongformerEncoder
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.encoder.transformer_encoder_multispkr import (
TransformerEncoder as TransformerEncoderMultiSpkr,
)
from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder
from espnet2.asr.encoder.wav2vec2_encoder import FairSeqWav2Vec2Encoder
from espnet2.asr.encoder.whisper_encoder import OpenAIWhisperEncoder
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.fused import FusedFrontends
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.whisper import WhisperFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.maskctc_model import MaskCTCModel
from espnet2.asr.pit_espnet_model import ESPnetASRModel as PITESPnetModel
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.postencoder.hugging_face_transformers_postencoder import (
HuggingFaceTransformersPostEncoder,
)
from espnet2.asr.postencoder.length_adaptor_postencoder import LengthAdaptorPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.linear import LinearProjection
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import (
AbsPreprocessor,
CommonPreprocessor,
CommonPreprocessor_multi,
)
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import float_or_none, int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
fused=FusedFrontends,
whisper=WhisperFrontend,
),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(
specaug=SpecAug,
),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
model_choices = ClassChoices(
"model",
classes=dict(
espnet=ESPnetASRModel,
maskctc=MaskCTCModel,
pit_espnet=PITESPnetModel,
),
type_check=AbsESPnetModel,
default="espnet",
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
linear=LinearProjection,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
conformer=ConformerEncoder,
transformer=TransformerEncoder,
transformer_multispkr=TransformerEncoderMultiSpkr,
contextual_block_transformer=ContextualBlockTransformerEncoder,
contextual_block_conformer=ContextualBlockConformerEncoder,
vgg_rnn=VGGRNNEncoder,
rnn=RNNEncoder,
wav2vec2=FairSeqWav2Vec2Encoder,
hubert=FairseqHubertEncoder,
hubert_pretrain=FairseqHubertPretrainEncoder,
torchaudiohubert=TorchAudioHuBERTPretrainEncoder,
longformer=LongformerEncoder,
branchformer=BranchformerEncoder,
whisper=OpenAIWhisperEncoder,
e_branchformer=EBranchformerEncoder,
),
type_check=AbsEncoder,
default="rnn",
)
postencoder_choices = ClassChoices(
name="postencoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostEncoder,
length_adaptor=LengthAdaptorPostEncoder,
),
type_check=AbsPostEncoder,
default=None,
optional=True,
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
transducer=TransducerDecoder,
mlm=MLMDecoder,
whisper=OpenAIWhisperDecoder,
hugging_face_transformers=HuggingFaceTransformersDecoder,
s4=S4Decoder,
),
type_check=AbsDecoder,
default=None,
optional=True,
)
preprocessor_choices = ClassChoices(
"preprocessor",
classes=dict(
default=CommonPreprocessor,
multi=CommonPreprocessor_multi,
),
type_check=AbsPreprocessor,
default="default",
)
class ASRTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --model and --model_conf
model_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --postencoder and --postencoder_conf
postencoder_choices,
# --decoder and --decoder_conf
decoder_choices,
# --preprocessor and --preprocessor_conf
preprocessor_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--ctc_conf",
action=NestedDictAction,
default=get_default_kwargs(CTC),
help="The keyword arguments for CTC class.",
)
group.add_argument(
"--joint_net_conf",
action=NestedDictAction,
default=None,
help="The keyword arguments for joint network class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--use_lang_prompt",
type=str2bool,
default=False,
help="Use language id as prompt",
)
group.add_argument(
"--use_nlp_prompt",
type=str2bool,
default=False,
help="Use natural language phrases as prompt",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=[
"bpe",
"char",
"word",
"phn",
"hugging_face",
"whisper_en",
"whisper_multilingual",
],
help="The text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[
None,
"tacotron",
"jaconv",
"vietnamese",
"whisper_en",
"whisper_basic",
],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group.add_argument(
"--speech_volume_normalize",
type=float_or_none,
default=None,
help="Scale the maximum amplitude to the given value.",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of noise decibel level.",
)
group.add_argument(
"--short_noise_thres",
type=float,
default=0.5,
help="If len(noise) / len(speech) is smaller than this threshold during "
"dynamic mixing, a warning will be displayed.",
)
group.add_argument(
"--aux_ctc_tasks",
type=str,
nargs="+",
default=[],
help="Auxillary tasks to train on using CTC loss. ",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
try:
_ = getattr(args, "preprocessor")
except AttributeError:
setattr(args, "preprocessor", "default")
setattr(args, "preprocessor_conf", dict())
except Exception as e:
raise e
preprocessor_class = preprocessor_choices.get_class(args.preprocessor)
retval = preprocessor_class(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
short_noise_thres=args.short_noise_thres
if hasattr(args, "short_noise_thres")
else 0.5,
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "rir_scp")
else None,
aux_task_names=args.aux_ctc_tasks
if hasattr(args, "aux_ctc_tasks")
else None,
use_lang_prompt=args.use_lang_prompt
if hasattr(args, "use_lang_prompt")
else None,
**args.preprocessor_conf,
use_nlp_prompt=args.use_nlp_prompt
if hasattr(args, "use_nlp_prompt")
else None,
use_label=args.use_label
if hasattr(args, "use_label")
else None,
use_prefix=args.use_prefix
if hasattr(args, "use_prefix")
else None,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "text")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
MAX_REFERENCE_NUM = 4
retval = ["text_spk{}".format(n) for n in range(2, MAX_REFERENCE_NUM + 1)]
retval=retval+["prompt"]+["text"]+["label"]+["prefix"]
retval = tuple(retval)
logging.info(f"Optional Data Names: {retval }")
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetASRModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
# If use multi-blank transducer criterion,
# big blank symbols are added just before the standard blank
if args.model_conf.get("transducer_multi_blank_durations", None) is not None:
sym_blank = args.model_conf.get("sym_blank", "<blank>")
blank_idx = token_list.index(sym_blank)
for dur in args.model_conf.get("transducer_multi_blank_durations"):
if f"<blank{dur}>" not in token_list: # avoid this during inference
token_list.insert(blank_idx, f"<blank{dur}>")
args.token_list = token_list
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
# 5. Post-encoder block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
encoder_output_size = encoder.output_size()
if getattr(args, "postencoder", None) is not None:
postencoder_class = postencoder_choices.get_class(args.postencoder)
postencoder = postencoder_class(
input_size=encoder_output_size, **args.postencoder_conf
)
encoder_output_size = postencoder.output_size()
else:
postencoder = None
# 5. Decoder
if getattr(args, "decoder", None) is not None:
decoder_class = decoder_choices.get_class(args.decoder)
if args.decoder == "transducer":
decoder = decoder_class(
vocab_size,
embed_pad=0,
**args.decoder_conf,
)
joint_network = JointNetwork(
vocab_size,
encoder.output_size(),
decoder.dunits,
**args.joint_net_conf,
)
else:
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
**args.decoder_conf,
)
joint_network = None
else:
decoder = None
joint_network = None
# 6. CTC
ctc = CTC(
odim=vocab_size, encoder_output_size=encoder_output_size, **args.ctc_conf
)
# 7. Build model
try:
model_class = model_choices.get_class(args.model)
except AttributeError:
model_class = model_choices.get_class("espnet")
model = model_class(
vocab_size=vocab_size,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
ctc=ctc,
joint_network=joint_network,
token_list=token_list,
**args.model_conf,
)
# FIXME(kamo): Should be done in model?
# 8. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| [] |
2024-01-10 | dynamic-superb/espnet-whisper | espnet2~text~build_tokenizer.py | from pathlib import Path
from typing import Dict, Iterable, Union
from typeguard import check_argument_types
from espnet2.text.abs_tokenizer import AbsTokenizer
from espnet2.text.char_tokenizer import CharTokenizer
from espnet2.text.hugging_face_tokenizer import HuggingFaceTokenizer
from espnet2.text.phoneme_tokenizer import PhonemeTokenizer
from espnet2.text.sentencepiece_tokenizer import SentencepiecesTokenizer
from espnet2.text.whisper_tokenizer import OpenAIWhisperTokenizer
from espnet2.text.word_tokenizer import WordTokenizer
def build_tokenizer(
token_type: str,
bpemodel: Union[Path, str, Iterable[str]] = None,
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
remove_non_linguistic_symbols: bool = False,
space_symbol: str = "<space>",
delimiter: str = None,
g2p_type: str = None,
nonsplit_symbol: Iterable[str] = None,
# tokenization encode (text2token) args, e.g. BPE dropout, only applied in training
encode_kwargs: Dict = None,
) -> AbsTokenizer:
"""A helper function to instantiate Tokenizer"""
assert check_argument_types()
if token_type == "bpe":
if bpemodel is None:
raise ValueError('bpemodel is required if token_type = "bpe"')
if remove_non_linguistic_symbols:
raise RuntimeError(
"remove_non_linguistic_symbols is not implemented for token_type=bpe"
)
if encode_kwargs is None:
encode_kwargs = dict()
return SentencepiecesTokenizer(bpemodel, encode_kwargs)
if token_type == "hugging_face":
if bpemodel is None:
raise ValueError('bpemodel is required if token_type = "hugging_face"')
if remove_non_linguistic_symbols:
raise RuntimeError(
"remove_non_linguistic_symbols is not "
+ "implemented for token_type=hugging_face"
)
return HuggingFaceTokenizer(bpemodel)
elif token_type == "word":
if remove_non_linguistic_symbols and non_linguistic_symbols is not None:
return WordTokenizer(
delimiter=delimiter,
non_linguistic_symbols=non_linguistic_symbols,
remove_non_linguistic_symbols=True,
)
else:
return WordTokenizer(delimiter=delimiter)
elif token_type == "char":
return CharTokenizer(
non_linguistic_symbols=non_linguistic_symbols,
space_symbol=space_symbol,
remove_non_linguistic_symbols=remove_non_linguistic_symbols,
nonsplit_symbols=nonsplit_symbol,
)
elif token_type == "phn":
return PhonemeTokenizer(
g2p_type=g2p_type,
non_linguistic_symbols=non_linguistic_symbols,
space_symbol=space_symbol,
remove_non_linguistic_symbols=remove_non_linguistic_symbols,
)
elif "whisper" in token_type:
return OpenAIWhisperTokenizer(bpemodel,added_tokens_txt=non_linguistic_symbols)
else:
raise ValueError(
f"token_mode must be one of bpe, word, char or phn: " f"{token_type}"
)
| [] |
2024-01-10 | philipp-digianvittorio/SafeWayHome | model_training~scripts~GPT3TrainingDataset.py |
import pandas as pd
import openai
from scripts.SQLAlchemyDB import db_select
articles = db_select("Articles")
api_key = "#"
def GPT_Completion(api_key, prompt, max_tokens=256):
openai.api_key = api_key
response = openai.Completion.create(engine="text-davinci-003",
prompt=prompt,
temperature=0.6,
top_p=1,
max_tokens=max_tokens,
frequency_penalty=0,
presence_penalty=0)
print(response.choices)
return response.choices[0].text
task = "Ordne für jeden Vorfall die Straftat einer der folgenden Klassen zu - Betrug, Diebstahl, Hausfriedensbruch, Einbruch, Raub, schwerer Raub, Erpressung, Verkehrsunfall, Verkehrsstraftat, Drogenhandel, Drogenbesitz, Waffenbesitz, Sachbeschädigung, Brandstiftung, fahrlässige Körperverletzung, Körperverletzung, gefährliche Körperverletzung, schwere Körperverletzung, Bedrohung, Widerstand, Exhibitionismus, sexuelle Belästigung, sexueller Übergriff, Vergewaltigung, Beleidigung, Tötungsdelikt, Sonstiges. Benenne den Haupttatort (Straße) und den Beginn der Tatzeit (Uhrzeit) und gib an, ob die Straftat in einem Gebäude begangen wurde: "
ex1 = '''Text: (th) Am Dienstag (22.11.2022) wurden zwei Crackdealer nach beobachteten Verkäufen festgenommen.
Vormittags folgten zivile Polizeibeamte zwei Drogenkonsumenten vom Bahnhofsgebiet zum Schweizer Platz, wo sie auf ihren Dealer trafen und ca. 0,25 Gramm Crack in Empfang nahmen. Die beiden Käufer wurden vor Ort kontrolliert, der 42-jährige wohnsitzlose Dealer im Bereich Eschenheimer Tor festgenommen. Er führte weitere ca. 0,5 Gramm Crack bei sich, welche er versuchte zu schlucken. Es folgte die Einlieferung in das Zentrale Polizeigewahrsam zwecks Prüfung einer richterlichen Vorführung.
Gegen 18:00 Uhr wurden Polizeibeamte auf einen weiteren Dealer im Bereich Am Hauptbahnhof aufmerksam. Sie identifizierten ihn als Verkäufer aus einem wenige Tage zuvor beobachteten Drogenhandel. Gegen den 42-Jährigen bestand zudem ein offener Haftbefehl.'''
result1 = '''[{'crime': ['Drogenhandel',], 'location': 'Schweizer Platz', 'time': 'Vormittag', 'indoors': False}, {'crime': ['Drogenhandel',], 'location': 'Am Hauptbahnhof', 'time': '18:00 Uhr', 'indoors': False}]'''
ex2 = '''Text: (dr) Eine Polizeistreife des 4. Reviers nahm am gestrigen Sonntag, den 20. November 2022, einen 19-Jährigen im Gutleutviertel fest, der sich bei einer Personenkontrolle besonders aggressiv zeigte. Bei ihm stellten sie auch Rauschgift sicher.
Eine Ruhestörung in der Gutleutstraße führte gegen 22:10 Uhr zu einer Personenkontrolle eines 19-Jährigen. Der junge Mann war offensichtlich nicht mit der polizeilichen Maßnahme einverstanden und machte dies deutlich, indem er Tritte und Schläge gegen die ihn kontrollierenden Beamten austeilte. Währenddessen versuchte er auch immer wieder ein Einhandmesser aus seiner Jackentasche zu ziehen, was jedoch unterbunden werden konnte. Den Beamten gelang es, den 19-Jährigen unter Widerstand festzunehmen. Als sie ihn durchsuchten, stießen sie auf Betäubungsmittel, darunter rund 90 Gramm Amphetamin und über 90 Ecstasy-Tabletten. Bei einer anschließenden Durchsuchung an der Anschrift seiner Eltern fanden die Beamten in seinem "Kinderzimmer" weitere Substanzen zur Herstellung von Drogen auf sowie verbotene Gegenstände. Sie stellten alle Beweismittel sicher.
Für den 19-Jährigen, welcher über keinen festen Wohnsitz verfügt, ging es in der Folge in die Haftzellen. Ihn erwartet nun ein Strafverfahren wegen des Verdachts des illegalen Drogenhandels und des Widerstands gegen Vollstreckungsbeamte. Er soll heute dem Haftrichter vorgeführt werden.'''
result2 = '''[{'crime': ['Sonstiges', 'Drogenhandel', 'Widerstand',], 'location': 'Gutleutstraße', 'time': '22:10 Uhr', 'indoors': False}]'''
ex3 = '''Text: (wie) Ein berauschter Autofahrer ohne Führerschein ist in der Nacht von Freitag auf Samstag bei Hattersheim vor der Polizei geflohen, konnte aber festgenommen werden.
Eine Streife der Autobahnpolizei wollte gegen 01:20 Uhr einen blauen Audi kontrollieren, da er mit eingeschalteter Nebelschlussleuchte auf der A 66 unterwegs war. Der Fahrer missachtete allerdings die Anhaltezeichen und wendete sein Fahrzeug, nachdem die Fahrzeuge bei Zeilsheim von der Autobahn abgefahren waren. Der Audi floh durch Zeilsheim und Sindlingen, überholte einen Linienbus mit hoher Geschwindigkeit und gefährdete in der Sindlinger Bahnstraße einen Fußgänger, der gerade einen Zebrastreifen nutzen wollte, aber rechtzeitig auf den Bürgersteig zurücktrat. Die Fahrt ging weiter bis nach Hattersheim, wo auch ein Fußgänger an einem Zebrastreifen gefährdet wurde. Der 18-Jährige aus Straßburg stand offensichtlich unter dem Einfluss von Betäubungsmitteln und war nicht im Besitz einer Fahrerlaubnis.
'''
result3 = '''[{'crime': ['Verkehrsstraftat',], 'location': 'Sindlinger Bahnstraße', 'time': '01:20 Uhr', 'indoors': False}]'''
ex4 = '''Text: (lo) In der heutigen Nacht wurde ein 59-jähriger Mann in der Altstadt von einem bislang unbekannten Täter angegriffen und lebensgefährlich verletzt. Die Polizei hat die Ermittlungen wegen eines versuchten Tötungsdeliktes aufgenommen.
Gegen 00:50 Uhr fanden Passanten den 59-Jährigen stark blutend im Bereich der Neuen Kräme. Der daraufhin alarmierte Rettungswagen verbrachte den Geschädigten in ein umliegendes Krankenhaus. Hier konnten mehrere Einstichstellen im Oberkörper des Geschädigten festgestellt werden. Nach Angaben des Geschädigten befand er sich bis ca. 00.00 Uhr in einer Lokalität am Römerberg. Von hier aus sei er in Richtung Neue Kräme fußläufig unterwegs gewesen.
Die Frankfurter Mordkommission ermittelt nun wegen eines versuchten Tötungsdelikts und sucht weitere Zeugen.'''
result4 = '''[{'crime': ['Tötungsdelikt',], 'location': 'Neue Kräme', 'time': '00:50 Uhr', 'indoors': False}]'''
ex5 = '''Text: POL-F: 221118 - 1336 Frankfurt-Schwanheim: Passanten halten Räuber fest Frankfurt (ots) (dr) In der Nacht von Mittwoch auf Donnerstag kam es in Schwanheim zu einem Straßenraub, bei dem ein 47-jähriger Mann einer 18-Jährigen gewaltsam das Mobiltelefon entwendete. Die 18-jährige Geschädigte und der 47-jährige Beschuldigte befanden sich zunächst in einem Bus der Linie 51 in Richtung Schwanheim. Als der Bus gegen 0:45 Uhr in der Geisenheimer Straße an der Haltestelle Kelsterbach Weg anhielt und die Geschädigte ausstieg, folgte ihr der Beschuldigte. Plötzlich schlug ihr der Mann mit der Faust ins Gesicht, sodass die Geschädigte zu Boden fiel und sich leicht verletzte. Nach dem Sturz entriss ihr der 47-Jährige ihr Mobiltelefon und flüchtete mit diesem in westliche Richtung. Gegen den 47-Jährigen wurde aufgrund des Straßenraubes ein Strafverfahren eingeleitet '''
result5 = '''[{'crime': ['Raub',], 'location': 'Geisenheimer Straße', 'time': '00:45 Uhr', 'indoors': False}]'''
prompt = task + "\n\n" + ex1 + "\n" + result1 + "\n" + "###" + "\n" + ex2 + "\n" + result2 + "\n" + "###" + "\n" + ex3 + "\n" + result3 + "\n" + "###" + "\n" + ex4 + "\n" + result4 + "\n" + "###" + "\n" + ex5 + "\n" + result5 + "\n" + "###" + "\n"
len(prompt.split(" "))
def extract_crime_data(articles):
crime_list = list()
for idx in range(len(articles)):
text = articles[idx]["headline"] + "\n" + articles[idx]["article"]
try:
y = eval(GPT_Completion(api_key, prompt + text))
except:
y = [{'crime': [], 'location': None, 'time': None, 'indoors': False}]
for d in y:
cl = {"hq_id": articles[idx]["hq_id"],
"article_id": articles[idx]["id"],
"date": articles[idx]["date"],
"crime": d["crime"],
"location": d["location"],
"time": d["time"],
"indoors": d["indoors"]}
crime_list.append(cl)
return crime_list
crime_list = extract_crime_data(articles)
hq_id = [x["hq_id"] for x in crime_list]
id_ = [x["article_id"] for x in crime_list]
date = [x["date"] for x in crime_list]
crime = [x["crime"] for x in crime_list]
location = [x["location"] for x in crime_list]
time = [x["time"] for x in crime_list]
indoors = [x["indoors"] for x in crime_list]
df = pd.DataFrame({"hq_id": hq_id,
"article_id": id_,
"date": date,
"crime": crime,
"location": location,
"time": time,
"indoors": indoors})
hq_id = [a["hq_id"] for a in articles3]
id = [a["id"] for a in articles3]
art = [a["headline"] + "\n" + a["article"] for a in articles3]
article_df = pd.DataFrame({"hq_id": hq_id,
"article_id": id,
"article": art})
dup_crime_ids = df[df["article_id"].duplicated()].index
dup_article_ids = article_df[article_df["article_id"].duplicated()].index
for i in range(len(dup_crime_ids)):
if (len(dup_article_ids) == 0) or (dup_crime_ids[0] != dup_article_ids[0]):
article_df = article_df.append(article_df.loc[dup_crime_ids[0]-1]).sort_index().reset_index(drop=True)
dup_crime_ids = dup_crime_ids[1:]
dup_article_ids = dup_article_ids + 1
else:
dup_crime_ids = dup_crime_ids[1:]
dup_article_ids = dup_article_ids[1:]
df["article"] = article_df["article"]
df = df[(df["crime"].astype(str) != "[]") & (df["location"].str.len() > 3)]
df["location"] = df["location"].apply(lambda x: x.split("/")[0].split(",")[0])
df.to_excel("gpt_output_all.xlsx", index=False)
| [
"taskba5197e0-90e1-40d7-b40b-8f4b50777528\n\nex1ba5197e0-90e1-40d7-b40b-8f4b50777528\nresult1ba5197e0-90e1-40d7-b40b-8f4b50777528\n###\nex2ba5197e0-90e1-40d7-b40b-8f4b50777528\nresult2ba5197e0-90e1-40d7-b40b-8f4b50777528\n###\nex3ba5197e0-90e1-40d7-b40b-8f4b50777528\nresult3ba5197e0-90e1-40d7-b40b-8f4b50777528\n###\nex4ba5197e0-90e1-40d7-b40b-8f4b50777528\nresult4ba5197e0-90e1-40d7-b40b-8f4b50777528\n###\nex5ba5197e0-90e1-40d7-b40b-8f4b50777528\n[{'crime': ['Raub',], 'location': 'Geisenheimer Straße', 'time': '00:45 Uhr', 'indoors': False}]\n###\n",
"Ordne für jeden Vorfall die Straftat einer der folgenden Klassen zu - Betrug, Diebstahl, Hausfriedensbruch, Einbruch, Raub, schwerer Raub, Erpressung, Verkehrsunfall, Verkehrsstraftat, Drogenhandel, Drogenbesitz, Waffenbesitz, Sachbeschädigung, Brandstiftung, fahrlässige Körperverletzung, Körperverletzung, gefährliche Körperverletzung, schwere Körperverletzung, Bedrohung, Widerstand, Exhibitionismus, sexuelle Belästigung, sexueller Übergriff, Vergewaltigung, Beleidigung, Tötungsdelikt, Sonstiges. Benenne den Haupttatort (Straße) und den Beginn der Tatzeit (Uhrzeit) und gib an, ob die Straftat in einem Gebäude begangen wurde: \n\nText: (th) Am Dienstag (22.11.2022) wurden zwei Crackdealer nach beobachteten Verkäufen festgenommen.\nVormittags folgten zivile Polizeibeamte zwei Drogenkonsumenten vom Bahnhofsgebiet zum Schweizer Platz, wo sie auf ihren Dealer trafen und ca. 0,25 Gramm Crack in Empfang nahmen. Die beiden Käufer wurden vor Ort kontrolliert, der 42-jährige wohnsitzlose Dealer im Bereich Eschenheimer Tor festgenommen. Er führte weitere ca. 0,5 Gramm Crack bei sich, welche er versuchte zu schlucken. Es folgte die Einlieferung in das Zentrale Polizeigewahrsam zwecks Prüfung einer richterlichen Vorführung.\nGegen 18:00 Uhr wurden Polizeibeamte auf einen weiteren Dealer im Bereich Am Hauptbahnhof aufmerksam. Sie identifizierten ihn als Verkäufer aus einem wenige Tage zuvor beobachteten Drogenhandel. Gegen den 42-Jährigen bestand zudem ein offener Haftbefehl.\n[{'crime': ['Drogenhandel',], 'location': 'Schweizer Platz', 'time': 'Vormittag', 'indoors': False}, {'crime': ['Drogenhandel',], 'location': 'Am Hauptbahnhof', 'time': '18:00 Uhr', 'indoors': False}]\n###\nText: (dr) Eine Polizeistreife des 4. Reviers nahm am gestrigen Sonntag, den 20. November 2022, einen 19-Jährigen im Gutleutviertel fest, der sich bei einer Personenkontrolle besonders aggressiv zeigte. Bei ihm stellten sie auch Rauschgift sicher.\nEine Ruhestörung in der Gutleutstraße führte gegen 22:10 Uhr zu einer Personenkontrolle eines 19-Jährigen. Der junge Mann war offensichtlich nicht mit der polizeilichen Maßnahme einverstanden und machte dies deutlich, indem er Tritte und Schläge gegen die ihn kontrollierenden Beamten austeilte. Währenddessen versuchte er auch immer wieder ein Einhandmesser aus seiner Jackentasche zu ziehen, was jedoch unterbunden werden konnte. Den Beamten gelang es, den 19-Jährigen unter Widerstand festzunehmen. Als sie ihn durchsuchten, stießen sie auf Betäubungsmittel, darunter rund 90 Gramm Amphetamin und über 90 Ecstasy-Tabletten. Bei einer anschließenden Durchsuchung an der Anschrift seiner Eltern fanden die Beamten in seinem \"Kinderzimmer\" weitere Substanzen zur Herstellung von Drogen auf sowie verbotene Gegenstände. Sie stellten alle Beweismittel sicher.\nFür den 19-Jährigen, welcher über keinen festen Wohnsitz verfügt, ging es in der Folge in die Haftzellen. Ihn erwartet nun ein Strafverfahren wegen des Verdachts des illegalen Drogenhandels und des Widerstands gegen Vollstreckungsbeamte. Er soll heute dem Haftrichter vorgeführt werden.\n[{'crime': ['Sonstiges', 'Drogenhandel', 'Widerstand',], 'location': 'Gutleutstraße', 'time': '22:10 Uhr', 'indoors': False}]\n###\nText: (wie) Ein berauschter Autofahrer ohne Führerschein ist in der Nacht von Freitag auf Samstag bei Hattersheim vor der Polizei geflohen, konnte aber festgenommen werden.\nEine Streife der Autobahnpolizei wollte gegen 01:20 Uhr einen blauen Audi kontrollieren, da er mit eingeschalteter Nebelschlussleuchte auf der A 66 unterwegs war. Der Fahrer missachtete allerdings die Anhaltezeichen und wendete sein Fahrzeug, nachdem die Fahrzeuge bei Zeilsheim von der Autobahn abgefahren waren. Der Audi floh durch Zeilsheim und Sindlingen, überholte einen Linienbus mit hoher Geschwindigkeit und gefährdete in der Sindlinger Bahnstraße einen Fußgänger, der gerade einen Zebrastreifen nutzen wollte, aber rechtzeitig auf den Bürgersteig zurücktrat. Die Fahrt ging weiter bis nach Hattersheim, wo auch ein Fußgänger an einem Zebrastreifen gefährdet wurde. Der 18-Jährige aus Straßburg stand offensichtlich unter dem Einfluss von Betäubungsmitteln und war nicht im Besitz einer Fahrerlaubnis.\n\n[{'crime': ['Verkehrsstraftat',], 'location': 'Sindlinger Bahnstraße', 'time': '01:20 Uhr', 'indoors': False}]\n###\nText: (lo) In der heutigen Nacht wurde ein 59-jähriger Mann in der Altstadt von einem bislang unbekannten Täter angegriffen und lebensgefährlich verletzt. Die Polizei hat die Ermittlungen wegen eines versuchten Tötungsdeliktes aufgenommen.\nGegen 00:50 Uhr fanden Passanten den 59-Jährigen stark blutend im Bereich der Neuen Kräme. Der daraufhin alarmierte Rettungswagen verbrachte den Geschädigten in ein umliegendes Krankenhaus. Hier konnten mehrere Einstichstellen im Oberkörper des Geschädigten festgestellt werden. Nach Angaben des Geschädigten befand er sich bis ca. 00.00 Uhr in einer Lokalität am Römerberg. Von hier aus sei er in Richtung Neue Kräme fußläufig unterwegs gewesen.\nDie Frankfurter Mordkommission ermittelt nun wegen eines versuchten Tötungsdelikts und sucht weitere Zeugen.\n[{'crime': ['Tötungsdelikt',], 'location': 'Neue Kräme', 'time': '00:50 Uhr', 'indoors': False}]\n###\nText: POL-F: 221118 - 1336 Frankfurt-Schwanheim: Passanten halten Räuber fest Frankfurt (ots) (dr) In der Nacht von Mittwoch auf Donnerstag kam es in Schwanheim zu einem Straßenraub, bei dem ein 47-jähriger Mann einer 18-Jährigen gewaltsam das Mobiltelefon entwendete. Die 18-jährige Geschädigte und der 47-jährige Beschuldigte befanden sich zunächst in einem Bus der Linie 51 in Richtung Schwanheim. Als der Bus gegen 0:45 Uhr in der Geisenheimer Straße an der Haltestelle Kelsterbach Weg anhielt und die Geschädigte ausstieg, folgte ihr der Beschuldigte. Plötzlich schlug ihr der Mann mit der Faust ins Gesicht, sodass die Geschädigte zu Boden fiel und sich leicht verletzte. Nach dem Sturz entriss ihr der 47-Jährige ihr Mobiltelefon und flüchtete mit diesem in westliche Richtung. Gegen den 47-Jährigen wurde aufgrund des Straßenraubes ein Strafverfahren eingeleitet \n[{'crime': ['Raub',], 'location': 'Geisenheimer Straße', 'time': '00:45 Uhr', 'indoors': False}]\n###\n"
] |
2024-01-10 | adwaitmandge/healing-horizon | api~embeddings~service_context.py | from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from llama_index import LangchainEmbedding, ServiceContext, LLMPredictor
from langchain import OpenAI, HuggingFaceHub
from langchain.chat_models import ChatOpenAI
import embeddings.prompt_helper as prompt_helper
def get_service_context(embed="HF", llm="OpenAI"):
print(embed)
print(llm)
embed_model = (
LangchainEmbedding(HuggingFaceHubEmbeddings())
if embed == "HF"
else LangchainEmbedding(OpenAIEmbeddings())
)
llm_predictor = (
LLMPredictor(
llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=256)
)
if llm == "OpenAI"
else LLMPredictor(
llm=HuggingFaceHub(
repo_id="google/flan-t5-xxl",
model_kwargs={"temperature": 0.4, "max_length": 400},
)
)
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model, chunk_size_limit=512
)
return service_context
| [] |
2024-01-10 | adwaitmandge/healing-horizon | api~Lib~site-packages~langchainplus_sdk~run_trees.py | """Schemas for the langchainplus API."""
from __future__ import annotations
import logging
import os
from concurrent.futures import Future, ThreadPoolExecutor
from datetime import datetime
from typing import Dict, List, Optional, Union
from uuid import UUID, uuid4
from pydantic import Field, root_validator, validator
from langchainplus_sdk.client import LangChainPlusClient
from langchainplus_sdk.schemas import RunBase, RunTypeEnum, infer_default_run_values
logger = logging.getLogger(__name__)
def _make_thread_pool() -> ThreadPoolExecutor:
"""Ensure a thread pool exists in the current context."""
return ThreadPoolExecutor(max_workers=1)
class RunTree(RunBase):
"""Run Schema with back-references for posting runs."""
name: str
id: UUID = Field(default_factory=uuid4)
parent_run: Optional[RunTree] = Field(default=None, exclude=True)
child_runs: List[RunTree] = Field(
default_factory=list,
exclude={"__all__": {"parent_run_id"}},
)
session_name: str = Field(
default_factory=lambda: os.environ.get("LANGCHAIN_SESSION", "default")
)
session_id: Optional[UUID] = Field(default=None)
execution_order: int = 1
child_execution_order: int = Field(default=1, exclude=True)
client: LangChainPlusClient = Field(
default_factory=LangChainPlusClient, exclude=True
)
executor: ThreadPoolExecutor = Field(
default_factory=_make_thread_pool, exclude=True
)
class Config:
arbitrary_types_allowed = True
@validator("executor", pre=True)
def validate_executor(cls, v: ThreadPoolExecutor) -> ThreadPoolExecutor:
"""Ensure the executor is running."""
if v._shutdown:
raise ValueError("Executor has been shutdown.")
return v
@root_validator(pre=True)
def infer_defaults(cls, values: dict) -> dict:
"""Assign name to the run."""
values = infer_default_run_values(values)
if values.get("child_runs") is None:
values["child_runs"] = []
return values
def end(
self,
*,
outputs: Optional[Dict] = None,
error: Optional[str] = None,
end_time: Optional[datetime] = None,
) -> None:
"""Set the end time of the run and all child runs."""
self.end_time = end_time or datetime.utcnow()
if outputs is not None:
self.outputs = outputs
if error is not None:
self.error = error
if self.parent_run:
self.parent_run.child_execution_order = max(
self.parent_run.child_execution_order,
self.child_execution_order,
)
def create_child(
self,
name: str,
run_type: Union[str, RunTypeEnum],
*,
run_id: Optional[UUID] = None,
serialized: Optional[Dict] = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None,
error: Optional[str] = None,
reference_example_id: Optional[UUID] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
extra: Optional[Dict] = None,
) -> RunTree:
"""Add a child run to the run tree."""
execution_order = self.child_execution_order + 1
serialized_ = serialized or {"name": name}
run = RunTree(
name=name,
id=run_id or uuid4(),
serialized=serialized_,
inputs=inputs or {},
outputs=outputs or {},
error=error,
run_type=run_type,
reference_example_id=reference_example_id,
start_time=start_time or datetime.utcnow(),
end_time=end_time or datetime.utcnow(),
execution_order=execution_order,
child_execution_order=execution_order,
extra=extra or {},
parent_run=self,
session_name=self.session_name,
client=self.client,
executor=self.executor,
)
self.child_runs.append(run)
return run
def post(self, exclude_child_runs: bool = True) -> Future:
"""Post the run tree to the API asynchronously."""
exclude = {"child_runs"} if exclude_child_runs else None
kwargs = self.dict(exclude=exclude, exclude_none=True)
return self.executor.submit(
self.client.create_run,
**kwargs,
)
def patch(self) -> Future:
"""Patch the run tree to the API in a background thread."""
return self.executor.submit(
self.client.update_run,
run_id=self.id,
outputs=self.outputs.copy() if self.outputs else None,
error=self.error,
parent_run_id=self.parent_run_id,
reference_example_id=self.reference_example_id,
)
| [] |
2024-01-10 | adwaitmandge/healing-horizon | api~Lib~site-packages~langchainplus_sdk~client.py | from __future__ import annotations
import logging
import socket
from datetime import datetime
from io import BytesIO
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterator,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
from urllib.parse import urlsplit
from uuid import UUID
import requests
from pydantic import BaseSettings, Field, root_validator
from requests import Response
from tenacity import (
before_sleep_log,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchainplus_sdk.evaluation.evaluator import RunEvaluator
from langchainplus_sdk.schemas import (
APIFeedbackSource,
Dataset,
DatasetCreate,
Example,
ExampleCreate,
ExampleUpdate,
Feedback,
FeedbackCreate,
FeedbackSourceBase,
FeedbackSourceType,
ListFeedbackQueryParams,
ListRunsQueryParams,
ModelFeedbackSource,
Run,
RunCreate,
RunTypeEnum,
RunUpdate,
TracerSession,
)
from langchainplus_sdk.utils import (
LangChainPlusAPIError,
LangChainPlusError,
LangChainPlusUserError,
raise_for_status_with_text,
request_with_retries,
xor_args,
)
if TYPE_CHECKING:
import pandas as pd
logger = logging.getLogger(__name__)
def _is_localhost(url: str) -> bool:
"""Check if the URL is localhost."""
try:
netloc = urlsplit(url).netloc.split(":")[0]
ip = socket.gethostbyname(netloc)
return ip == "127.0.0.1" or ip.startswith("0.0.0.0") or ip.startswith("::")
except socket.gaierror:
return False
ID_TYPE = Union[UUID, str]
def _default_retry_config() -> Dict[str, Any]:
return dict(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type(LangChainPlusAPIError),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
class LangChainPlusClient(BaseSettings):
"""Client for interacting with the LangChain+ API."""
api_key: Optional[str] = Field(default=None, env="LANGCHAIN_API_KEY")
api_url: str = Field(default="http://localhost:1984", env="LANGCHAIN_ENDPOINT")
retry_config: Mapping[str, Any] = Field(
default_factory=_default_retry_config, exclude=True
)
@root_validator(pre=True)
def validate_api_key_if_hosted(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Verify API key is provided if url not localhost."""
api_url: str = values.get("api_url", "http://localhost:1984")
api_key: Optional[str] = values.get("api_key")
if not _is_localhost(api_url):
if not api_key:
raise LangChainPlusUserError(
"API key must be provided when using hosted LangChain+ API"
)
return values
def _repr_html_(self) -> str:
"""Return an HTML representation of the instance with a link to the URL."""
if _is_localhost(self.api_url):
link = "http://localhost"
elif "dev" in self.api_url.split(".", maxsplit=1)[0]:
link = "https://dev.langchain.plus"
else:
link = "https://www.langchain.plus"
return f'<a href="{link}", target="_blank" rel="noopener">LangChain+ Client</a>'
def __repr__(self) -> str:
"""Return a string representation of the instance with a link to the URL."""
return f"LangChainPlusClient (API URL: {self.api_url})"
@property
def _headers(self) -> Dict[str, str]:
"""Get the headers for the API request."""
headers = {}
if self.api_key:
headers["x-api-key"] = self.api_key
return headers
def _get_with_retries(
self, path: str, params: Optional[Dict[str, Any]] = None
) -> Response:
return request_with_retries(
"get",
f"{self.api_url}{path}",
request_kwargs={"params": params, "headers": self._headers},
retry_config=self.retry_config,
)
def upload_dataframe(
self,
df: pd.DataFrame,
name: str,
description: str,
input_keys: Sequence[str],
output_keys: Sequence[str],
) -> Dataset:
"""Upload a dataframe as individual examples to the LangChain+ API."""
dataset = self.create_dataset(dataset_name=name, description=description)
for row in df.itertuples():
inputs = {key: getattr(row, key) for key in input_keys}
outputs = {key: getattr(row, key) for key in output_keys}
self.create_example(inputs, outputs=outputs, dataset_id=dataset.id)
return dataset
def upload_csv(
self,
csv_file: Union[str, Tuple[str, BytesIO]],
description: str,
input_keys: Sequence[str],
output_keys: Sequence[str],
) -> Dataset:
"""Upload a CSV file to the LangChain+ API."""
files = {"file": csv_file}
data = {
"input_keys": ",".join(input_keys),
"output_keys": ",".join(output_keys),
"description": description,
}
response = requests.post(
self.api_url + "/datasets/upload",
headers=self._headers,
data=data,
files=files,
)
raise_for_status_with_text(response)
result = response.json()
# TODO: Make this more robust server-side
if "detail" in result and "already exists" in result["detail"]:
file_name = csv_file if isinstance(csv_file, str) else csv_file[0]
file_name = file_name.split("/")[-1]
raise ValueError(f"Dataset {file_name} already exists")
return Dataset(**result)
def create_run(
self,
name: str,
inputs: Dict[str, Any],
run_type: Union[str, RunTypeEnum],
**kwargs: Any,
) -> Run:
"""Persist a run to the LangChain+ API."""
run_create = RunCreate(
**kwargs,
name=name,
inputs=inputs,
run_type=run_type,
)
headers = {**self._headers, "Accept": "application/json"}
request_with_retries(
"post",
f"{self.api_url}/runs",
request_kwargs={
"data": run_create.json(exclude_none=True),
"headers": headers,
},
retry_config=self.retry_config,
)
return Run(**run_create.dict(exclude_none=True))
def update_run(
self,
run_id: ID_TYPE,
**kwargs: Any,
) -> None:
"""Update a run to the LangChain+ API."""
run_update = RunUpdate(
**kwargs,
)
headers = {**self._headers, "Accept": "application/json"}
request_with_retries(
"patch",
f"{self.api_url}/runs/{run_id}",
request_kwargs={"data": run_update.json(), "headers": headers},
retry_config=self.retry_config,
)
def read_run(self, run_id: ID_TYPE) -> Run:
"""Read a run from the LangChain+ API."""
response = self._get_with_retries(f"/runs/{run_id}")
return Run(**response.json())
def list_runs(
self,
*,
session_id: Optional[ID_TYPE] = None,
session_name: Optional[str] = None,
run_type: Optional[str] = None,
**kwargs: Any,
) -> Iterator[Run]:
"""List runs from the LangChain+ API."""
if session_name is not None:
if session_id is not None:
raise ValueError("Only one of session_id or session_name may be given")
session_id = self.read_session(session_name=session_name).id
query_params = ListRunsQueryParams(
session_id=session_id, run_type=run_type, **kwargs
)
response = self._get_with_retries(
"/runs", params=query_params.dict(exclude_none=True)
)
yield from [Run(**run) for run in response.json()]
def delete_run(self, run_id: ID_TYPE) -> None:
"""Delete a run from the LangChain+ API."""
response = requests.delete(
f"{self.api_url}/runs/{run_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return
def create_session(
self, session_name: str, session_extra: Optional[dict] = None
) -> TracerSession:
"""Create a session on the LangChain+ API."""
endpoint = f"{self.api_url}/sessions?upsert=true"
body = {
"name": session_name,
"extra": session_extra,
}
response = requests.post(
endpoint,
headers=self._headers,
json=body,
)
raise_for_status_with_text(response)
return TracerSession(**response.json())
@xor_args(("session_id", "session_name"))
def read_session(
self, *, session_id: Optional[str] = None, session_name: Optional[str] = None
) -> TracerSession:
"""Read a session from the LangChain+ API."""
path = "/sessions"
params: Dict[str, Any] = {"limit": 1}
if session_id is not None:
path += f"/{session_id}"
elif session_name is not None:
params["name"] = session_name
else:
raise ValueError("Must provide session_name or session_id")
response = self._get_with_retries(path, params=params)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise LangChainPlusError(f"Session {session_name} not found")
return TracerSession(**result[0])
return TracerSession(**response.json())
def list_sessions(self) -> Iterator[TracerSession]:
"""List sessions from the LangChain+ API."""
response = self._get_with_retries("/sessions")
yield from [TracerSession(**session) for session in response.json()]
@xor_args(("session_name", "session_id"))
def delete_session(
self, *, session_name: Optional[str] = None, session_id: Optional[str] = None
) -> None:
"""Delete a session from the LangChain+ API."""
if session_name is not None:
session_id = self.read_session(session_name=session_name).id
elif session_id is None:
raise ValueError("Must provide session_name or session_id")
response = requests.delete(
self.api_url + f"/sessions/{session_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return None
def create_dataset(
self, dataset_name: str, *, description: Optional[str] = None
) -> Dataset:
"""Create a dataset in the LangChain+ API."""
dataset = DatasetCreate(
name=dataset_name,
description=description,
)
response = requests.post(
self.api_url + "/datasets",
headers=self._headers,
data=dataset.json(),
)
raise_for_status_with_text(response)
return Dataset(**response.json())
@xor_args(("dataset_name", "dataset_id"))
def read_dataset(
self,
*,
dataset_name: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> Dataset:
path = "/datasets"
params: Dict[str, Any] = {"limit": 1}
if dataset_id is not None:
path += f"/{dataset_id}"
elif dataset_name is not None:
params["name"] = dataset_name
else:
raise ValueError("Must provide dataset_name or dataset_id")
response = self._get_with_retries(
path,
params=params,
)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise LangChainPlusError(f"Dataset {dataset_name} not found")
return Dataset(**result[0])
return Dataset(**result)
def list_datasets(self, limit: int = 100) -> Iterator[Dataset]:
"""List the datasets on the LangChain+ API."""
response = self._get_with_retries("/datasets", params={"limit": limit})
yield from [Dataset(**dataset) for dataset in response.json()]
@xor_args(("dataset_id", "dataset_name"))
def delete_dataset(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
) -> Dataset:
"""Delete a dataset by ID or name."""
if dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
if dataset_id is None:
raise ValueError("Must provide either dataset name or ID")
response = requests.delete(
f"{self.api_url}/datasets/{dataset_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return Dataset(**response.json())
@xor_args(("dataset_id", "dataset_name"))
def create_example(
self,
inputs: Mapping[str, Any],
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime] = None,
outputs: Optional[Mapping[str, Any]] = None,
) -> Example:
"""Create a dataset example in the LangChain+ API."""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
data = {
"inputs": inputs,
"outputs": outputs,
"dataset_id": dataset_id,
}
if created_at:
data["created_at"] = created_at.isoformat()
example = ExampleCreate(**data)
response = requests.post(
f"{self.api_url}/examples", headers=self._headers, data=example.json()
)
raise_for_status_with_text(response)
result = response.json()
return Example(**result)
def read_example(self, example_id: ID_TYPE) -> Example:
"""Read an example from the LangChain+ API."""
response = self._get_with_retries(f"/examples/{example_id}")
return Example(**response.json())
def list_examples(
self, dataset_id: Optional[ID_TYPE] = None, dataset_name: Optional[str] = None
) -> Iterator[Example]:
"""List the datasets on the LangChain+ API."""
params = {}
if dataset_id is not None:
params["dataset"] = dataset_id
elif dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
params["dataset"] = dataset_id
else:
pass
response = self._get_with_retries("/examples", params=params)
yield from [Example(**dataset) for dataset in response.json()]
def update_example(
self,
example_id: str,
*,
inputs: Optional[Dict[str, Any]] = None,
outputs: Optional[Mapping[str, Any]] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> Dict[str, Any]:
"""Update a specific example."""
example = ExampleUpdate(
inputs=inputs,
outputs=outputs,
dataset_id=dataset_id,
)
response = requests.patch(
f"{self.api_url}/examples/{example_id}",
headers=self._headers,
data=example.json(exclude_none=True),
)
raise_for_status_with_text(response)
return response.json()
def delete_example(self, example_id: ID_TYPE) -> Example:
"""Delete an example by ID."""
response = requests.delete(
f"{self.api_url}/examples/{example_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return Example(**response.json())
def evaluate_run(
self,
run: Union[Run, str, UUID],
evaluator: RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
) -> Feedback:
"""Evaluate a run."""
if isinstance(run, (str, UUID)):
run_ = self.read_run(run)
elif isinstance(run, Run):
run_ = run
else:
raise TypeError(f"Invalid run type: {type(run)}")
if run_.reference_example_id is not None:
reference_example = self.read_example(run_.reference_example_id)
else:
reference_example = None
feedback_result = evaluator.evaluate_run(
run_,
example=reference_example,
)
source_info = source_info or {}
if feedback_result.evaluator_info:
source_info = {**feedback_result.evaluator_info, **source_info}
return self.create_feedback(
run_.id,
feedback_result.key,
score=feedback_result.score,
value=feedback_result.value,
comment=feedback_result.comment,
correction=feedback_result.correction,
source_info=source_info,
feedback_source_type=FeedbackSourceType.MODEL,
)
async def aevaluate_run(
self,
run: Union[Run, str, UUID],
evaluator: RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
) -> Feedback:
"""Evaluate a run."""
if isinstance(run, (str, UUID)):
run_ = self.read_run(run)
elif isinstance(run, Run):
run_ = run
else:
raise TypeError(f"Invalid run type: {type(run)}")
if run_.reference_example_id is not None:
reference_example = self.read_example(run_.reference_example_id)
else:
reference_example = None
feedback_result = await evaluator.aevaluate_run(
run_,
example=reference_example,
)
source_info = source_info or {}
if feedback_result.evaluator_info:
source_info = {**feedback_result.evaluator_info, **source_info}
return self.create_feedback(
run_.id,
feedback_result.key,
score=feedback_result.score,
value=feedback_result.value,
comment=feedback_result.comment,
correction=feedback_result.correction,
source_info=source_info,
feedback_source_type=FeedbackSourceType.MODEL,
)
def create_feedback(
self,
run_id: ID_TYPE,
key: str,
*,
score: Union[float, int, bool, None] = None,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[str, dict, None] = None,
comment: Union[str, None] = None,
source_info: Optional[Dict[str, Any]] = None,
feedback_source_type: Union[FeedbackSourceType, str] = FeedbackSourceType.API,
) -> Feedback:
"""Create a feedback in the LangChain+ API.
Args:
run_id: The ID of the run to provide feedback on.
key: The name of the metric, tag, or 'aspect' this
feedback is about.
score: The score to rate this run on the metric
or aspect.
value: The display value or non-numeric value for this feedback.
correction: The proper ground truth for this run.
comment: A comment about this feedback.
source_info: Information about the source of this feedback.
feedback_source_type: The type of feedback source.
"""
if feedback_source_type == FeedbackSourceType.API:
feedback_source: FeedbackSourceBase = APIFeedbackSource(
metadata=source_info
)
elif feedback_source_type == FeedbackSourceType.MODEL:
feedback_source = ModelFeedbackSource(metadata=source_info)
else:
raise ValueError(f"Unknown feedback source type {feedback_source_type}")
feedback = FeedbackCreate(
run_id=run_id,
key=key,
score=score,
value=value,
correction=correction,
comment=comment,
feedback_source=feedback_source,
)
response = requests.post(
self.api_url + "/feedback",
headers={**self._headers, "Content-Type": "application/json"},
data=feedback.json(),
)
raise_for_status_with_text(response)
return Feedback(**feedback.dict())
def read_feedback(self, feedback_id: ID_TYPE) -> Feedback:
"""Read a feedback from the LangChain+ API."""
response = self._get_with_retries(f"/feedback/{feedback_id}")
return Feedback(**response.json())
def list_feedback(
self,
*,
run_ids: Optional[Sequence[ID_TYPE]] = None,
**kwargs: Any,
) -> Iterator[Feedback]:
"""List the feedback objects on the LangChain+ API."""
params = ListFeedbackQueryParams(
run=run_ids,
**kwargs,
)
response = self._get_with_retries(
"/feedback", params=params.dict(exclude_none=True)
)
yield from [Feedback(**feedback) for feedback in response.json()]
def delete_feedback(self, feedback_id: ID_TYPE) -> None:
"""Delete a feedback by ID."""
response = requests.delete(
f"{self.api_url}/feedback/{feedback_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
| [] |
2024-01-10 | adwaitmandge/healing-horizon | api~Lib~site-packages~llama_index~query_engine~sql_vector_query_engine.py | """SQL Vector query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Any, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import NLStructStoreQueryEngine
from llama_index.indices.vector_store.retrievers.auto_retriever import (
VectorIndexAutoRetriever,
)
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been translated into a vector store query.
The vector store query and response is given below.
Given SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed vector store query: {vector_store_query_str}
Vector store response: {vector_store_response_str}
Response:
""" # noqa
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLAutoVectorQueryEngine(BaseQueryEngine):
"""SQL + Vector Index Auto Retriever Query Engine.
This query engine can query both a SQL database
as well as a vector database. It will first decide
whether it needs to query the SQL database or vector store.
If it decides to query the SQL database, it will also decide
whether to augment information with retrieved results from the vector store.
We use the VectorIndexAutoRetriever to retrieve results.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
vector_query_tool (QueryEngineTool): Query engine tool for vector database.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_vector_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL vector
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_vector_synthesis (bool): Whether to use SQL vector synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
vector_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_vector_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_vector_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, NLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"NLStructStoreQueryEngine"
)
if not isinstance(vector_query_tool.query_engine, RetrieverQueryEngine):
raise ValueError(
"vector_query_tool.query_engine must be an instance of "
"RetrieverQueryEngine"
)
if not isinstance(
vector_query_tool.query_engine.retriever, VectorIndexAutoRetriever
):
raise ValueError(
"vector_query_tool.query_engine.retriever must be an instance "
"of VectorIndexAutoRetriever"
)
self._sql_query_tool = sql_query_tool
self._vector_query_tool = vector_query_tool
sql_query_engine = cast(NLStructStoreQueryEngine, sql_query_tool.query_engine)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_vector_synthesis_prompt = (
sql_vector_synthesis_prompt or DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_vector_synthesis = use_sql_vector_synthesis
self._verbose = verbose
@classmethod
def from_sql_and_vector_query_engines(
cls,
sql_query_engine: NLStructStoreQueryEngine,
sql_tool_name: str,
sql_tool_description: str,
vector_auto_retriever: RetrieverQueryEngine,
vector_tool_name: str,
vector_tool_description: str,
selector: Optional[LLMSingleSelector] = None,
**kwargs: Any,
) -> "SQLAutoVectorQueryEngine":
"""From SQL and vector query engines.
Args:
sql_query_engine (NLStructStoreQueryEngine): SQL query engine.
vector_query_engine (VectorIndexAutoRetriever): Vector retriever.
selector (Optional[LLMSingleSelector]): Selector to use.
"""
sql_query_tool = QueryEngineTool.from_defaults(
sql_query_engine, name=sql_tool_name, description=sql_tool_description
)
vector_query_tool = QueryEngineTool.from_defaults(
vector_auto_retriever,
name=vector_tool_name,
description=vector_tool_description,
)
return cls(sql_query_tool, vector_query_tool, selector, **kwargs)
def _query_sql_vector(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + vector db in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_vector_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
vector_response = self._vector_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"Vector DB response: {vector_response}\n", color="pink")
logger.info(f"> Vector DB response: {vector_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_vector_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
vector_store_query_str=new_query.query_str,
vector_store_response_str=str(vector_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(vector_response.extra_info or {}),
}
source_nodes = vector_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._vector_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_vector(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(f"Querying vector database: {result.reason}\n", color="blue")
logger.info(f"> Querying vector database: {result.reason}")
response = self._vector_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Vector DB response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been translated into a vector store query.\nThe vector store query and response is given below.\nGiven SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed vector store query: {vector_store_query_str}\nVector store response: {vector_store_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
2024-01-10 | adwaitmandge/healing-horizon | api~Lib~site-packages~langchainplus_sdk~schemas.py | """Schemas for the langchainplus API."""
from __future__ import annotations
import os
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, Union
from uuid import UUID, uuid4
from pydantic import (
BaseModel,
Field,
StrictBool,
StrictFloat,
StrictInt,
root_validator,
)
from typing_extensions import Literal
from langchainplus_sdk.utils import get_runtime_environment
SCORE_TYPE = Union[StrictBool, StrictInt, StrictFloat, None]
VALUE_TYPE = Union[Dict, StrictBool, StrictInt, StrictFloat, str, None]
class ExampleBase(BaseModel):
"""Example base model."""
dataset_id: UUID
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = Field(default=None)
class Config:
frozen = True
class ExampleCreate(ExampleBase):
"""Example create model."""
id: Optional[UUID]
created_at: datetime = Field(default_factory=datetime.utcnow)
class Example(ExampleBase):
"""Example model."""
id: UUID
created_at: datetime
modified_at: Optional[datetime] = Field(default=None)
runs: List[Run] = Field(default_factory=list)
class ExampleUpdate(BaseModel):
"""Update class for Example."""
dataset_id: Optional[UUID] = None
inputs: Optional[Dict[str, Any]] = None
outputs: Optional[Dict[str, Any]] = None
class Config:
frozen = True
class DatasetBase(BaseModel):
"""Dataset base model."""
name: str
description: Optional[str] = None
class Config:
frozen = True
class DatasetCreate(DatasetBase):
"""Dataset create model."""
id: Optional[UUID]
created_at: datetime = Field(default_factory=datetime.utcnow)
class Dataset(DatasetBase):
"""Dataset ORM model."""
id: UUID
created_at: datetime
modified_at: Optional[datetime] = Field(default=None)
class RunTypeEnum(str, Enum):
"""Enum for run types."""
tool = "tool"
chain = "chain"
llm = "llm"
class RunBase(BaseModel):
"""Base Run schema."""
id: Optional[UUID]
start_time: datetime = Field(default_factory=datetime.utcnow)
end_time: datetime = Field(default_factory=datetime.utcnow)
extra: dict = Field(default_factory=dict)
error: Optional[str]
execution_order: int
serialized: dict
inputs: dict
outputs: Optional[dict]
reference_example_id: Optional[UUID]
run_type: RunTypeEnum
parent_run_id: Optional[UUID]
class Run(RunBase):
"""Run schema when loading from the DB."""
id: UUID
name: str
child_runs: List[Run] = Field(default_factory=list)
@root_validator(pre=True)
def assign_name(cls, values: dict) -> dict:
"""Assign name to the run."""
if "name" not in values:
values["name"] = values["serialized"]["name"]
return values
def infer_default_run_values(values: Dict[str, Any]) -> Dict[str, Any]:
if "name" not in values:
if "serialized" not in values:
raise ValueError("Must provide either name or serialized.")
if "name" not in values["serialized"]:
raise ValueError(
"Must provide either name or serialized with a name attribute."
)
values["name"] = values["serialized"]["name"]
elif "serialized" not in values:
values["serialized"] = {"name": values["name"]}
if "execution_order" not in values:
values["execution_order"] = 1
if "child_execution_order" not in values:
values["child_execution_order"] = values["execution_order"]
if values.get("parent_run") is not None:
values["parent_run_id"] = values["parent_run"].id
extra = values.get("extra", {})
if "runtime" not in extra:
extra["runtime"] = {}
runtime_env = get_runtime_environment()
for k, v in runtime_env.items():
if k not in extra["runtime"]:
extra["runtime"][k] = v
values["extra"] = extra
return values
class RunCreate(RunBase):
"""Run create schema."""
id: UUID = Field(default_factory=uuid4)
name: str
session_name: str = Field(
default_factory=lambda: os.environ.get("LANGCHAIN_SESSION", "default")
)
child_runs: Optional[List[RunCreate]] = None
@root_validator(pre=True)
def add_runtime_env(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Add env info to the run."""
return infer_default_run_values(values)
class RunUpdate(BaseModel):
end_time: Optional[datetime]
error: Optional[str]
outputs: Optional[dict]
parent_run_id: Optional[UUID]
reference_example_id: Optional[UUID]
class ListRunsQueryParams(BaseModel):
"""Query params for GET /runs endpoint."""
id: Optional[List[UUID]]
"""Filter runs by id."""
parent_run: Optional[UUID]
"""Filter runs by parent run."""
run_type: Optional[RunTypeEnum]
"""Filter runs by type."""
session: Optional[UUID] = Field(default=None, alias="session_id")
"""Only return runs within a session."""
reference_example: Optional[UUID]
"""Only return runs that reference the specified dataset example."""
execution_order: Optional[int]
"""Filter runs by execution order."""
error: Optional[bool]
"""Whether to return only runs that errored."""
offset: Optional[int]
"""The offset of the first run to return."""
limit: Optional[int]
"""The maximum number of runs to return."""
start_time: Optional[datetime] = Field(
default=None,
alias="start_before",
description="Query Runs that started <= this time",
)
end_time: Optional[datetime] = Field(
default=None,
alias="end_after",
description="Query Runs that ended >= this time",
)
class Config:
extra = "forbid"
frozen = True
@root_validator
def validate_time_range(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that start_time <= end_time."""
start_time = values.get("start_time")
end_time = values.get("end_time")
if start_time and end_time and start_time > end_time:
raise ValueError("start_time must be <= end_time")
return values
class FeedbackSourceBase(BaseModel):
type: str
metadata: Optional[Dict[str, Any]] = None
class Config:
frozen = True
class APIFeedbackSource(FeedbackSourceBase):
"""API feedback source."""
type: Literal["api"] = "api"
class ModelFeedbackSource(FeedbackSourceBase):
"""Model feedback source."""
type: Literal["model"] = "model"
class FeedbackSourceType(Enum):
"""Feedback source type."""
API = "api"
"""General feedback submitted from the API."""
MODEL = "model"
"""Model-assisted feedback."""
class FeedbackBase(BaseModel):
"""Feedback schema."""
created_at: datetime = Field(default_factory=datetime.utcnow)
"""The time the feedback was created."""
modified_at: datetime = Field(default_factory=datetime.utcnow)
"""The time the feedback was last modified."""
run_id: UUID
"""The associated run ID this feedback is logged for."""
key: str
"""The metric name, tag, or aspect to provide feedback on."""
score: SCORE_TYPE = None
"""Value or score to assign the run."""
value: VALUE_TYPE = None
"""The display value, tag or other value for the feedback if not a metric."""
comment: Optional[str] = None
"""Comment or explanation for the feedback."""
correction: Union[str, dict, None] = None
"""Correction for the run."""
feedback_source: Optional[FeedbackSourceBase] = None
"""The source of the feedback."""
class Config:
frozen = True
class FeedbackCreate(FeedbackBase):
"""Schema used for creating feedback."""
id: UUID = Field(default_factory=uuid4)
feedback_source: FeedbackSourceBase
"""The source of the feedback."""
class Feedback(FeedbackBase):
"""Schema for getting feedback."""
id: UUID
feedback_source: Optional[FeedbackSourceBase] = None
"""The source of the feedback. In this case"""
class ListFeedbackQueryParams(BaseModel):
"""Query Params for listing feedbacks."""
run: Optional[Sequence[UUID]] = None
limit: int = 100
offset: int = 0
class Config:
"""Config for query params."""
extra = "forbid"
frozen = True
class TracerSession(BaseModel):
"""TracerSession schema for the V2 API."""
id: UUID
start_time: datetime = Field(default_factory=datetime.utcnow)
name: Optional[str] = None
extra: Optional[Dict[str, Any]] = None
tenant_id: UUID
| [] |
2024-01-10 | goncaloperes/triton | python~triton~ops~blocksparse~matmul.py | import triton
import triton.language as tl
import triton._C.libtriton as libtriton
import torch
@triton.jit
def _kernel(
A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc, stride_hc,
stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta
):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * TZ * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * TZ * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK*TZ):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * TZ * stride_ka
inc_b = TK * TZ * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0)
##############
# MAIN API #
##############
class _matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes):
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current + d] = seg_max
if r < seg_min and not isempty:
segments[current + d - 1] += r
if r >= seg_min or isempty:
segments[current + d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _matmul.locks or \
size > _matmul.locks[dev].size(0):
_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, device):
start_width = 128 // block
layout = layout.type(torch.int32)
superblocks = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2], start_width)
luts, widths, packs = [], [], []
for size, nnz in superblocks:
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs):
# (A * B)^T = (B^T * A^T)
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
# Shape check
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
if a_inner != b_inner:
raise ValueError(f"Size of tensor A along the {_dim_to_name(a_dim)} dim ({a_inner}) must match size "
f"of tensor B along the {_dim_to_name(b_dim)} dim ({b_inner})")
if a_inner % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
batch_size = a.size(0)
a_outer = a.size(3 if trans_a else 2)
dtype = a.dtype
device = a.device
# create kernel
total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.zeros((batch_size, total_width, block, block), dtype=dtype, device=device)
for lut, width, pack in zip(luts, widths, packs):
num_lock = 1
meta = {'TM': block * pack, 'TN': block * pack, 'BLOCK': block, 'TK': 32, 'TZ': 1,
'SDD': True, 'DSD': False, 'DDS': False}
# create output
locks = _matmul.get_locks(2 * width * batch_size * num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
for off_width in range(0, width, max_width):
grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]
_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(0),
c.stride(2),
c.stride(3),
a_outer,
a_outer,
a_inner,
off_width,
lut,
locks,
num_lock,
num_warps=4,
**meta
)
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
# DENSE = SPARSE x DENSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _matmul.load_balance(sizes)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero(as_tuple=False)
else:
nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
idx = transform(nnz[:, 2] * block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx * block * block
wincs[1:] -= widx[:-1] * block * block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div - 1) * step
else:
wincs[:, 1:] = step * block
wincs[:, 0] -= (div - 1) * step * block
wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2 * div
segments *= div
# create header
width = column.size(0)
offsets += 6 * width
header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1,
'SDD': False, 'DSD': False, 'DDS': True}
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
AS2,
BS2,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta
)
return c
@staticmethod
def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS1 = block * spdims[2 if trans_a else 1]
BS0 = b.size(0)
BS1 = b.size(1)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1,
'SDD': False, 'DSD': True, 'DDS': False}
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
_kernel[grid](
a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
BS3,
AS1,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta
)
return c
fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}
@staticmethod
def forward(
ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs, da_lut, da_num_locks,
da_width, da_packs, db_lut, db_num_locks, db_width, db_packs
):
c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width, c_packs)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_packs = db_packs
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
da, db = None, None
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _matmul.fn[mode_da](
dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_num_locks, ctx.da_width,
ctx.da_packs
)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _matmul.fn[mode_db](
a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_num_locks, ctx.db_width,
ctx.db_packs
)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class matmul:
def make_lut(self, dtype, device):
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_sdd_lut(layout, block, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_a, device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_b, device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_sdd_lut(layout, block, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_a, device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_sdd_lut(layout, block, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,
da_lut, da_num_locks, da_width, da_packs,
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False):
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.block = block
self.mode = mode
self.trans_a = trans_a
self.trans_b = trans_b
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b, -2)
self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long() # Above code assumes the layout tensor is an integral type
self.layout = layout
self.spdims = layout.shape
def __call__(self, a, b):
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# If we don't check for invalid shapes, devices, & dtypes here, they will lead to undefined behavior
# and potential illegal memory accesses
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# execute
c = _matmul.apply(
a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut, c_num_locks, c_width,
c_packs, da_lut, da_num_locks, da_width, da_packs, db_lut, db_num_locks, db_width, db_packs
)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not a.is_cuda:
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError("Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
def _dim_to_name(x):
# assert x in (-1, -2)
return "last" if x == -1 else "second to last"
| [] |
2024-01-10 | martin0359/chatudddddddddd | googlesearch.py | """Util that calls Google Search."""
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class GoogleSearchAPIWrapper(BaseModel):
"""Wrapper for Google Search API.
Adapted from: Instructions adapted from https://stackoverflow.com/questions/
37083058/
programmatically-searching-google-in-python-using-custom-search
TODO: DOCS for using it
1. Install google-api-python-client
- If you don't already have a Google account, sign up.
- If you have never created a Google APIs Console project,
read the Managing Projects page and create a project in the Google API Console.
- Install the library using pip install google-api-python-client
The current version of the library is 2.70.0 at this time
2. To create an API key:
- Navigate to the APIs & Services→Credentials panel in Cloud Console.
- Select Create credentials, then select API key from the drop-down menu.
- The API key created dialog box displays your newly created key.
- You now have an API_KEY
3. Setup Custom Search Engine so you can search the entire web
- Create a custom search engine in this link.
- In Sites to search, add any valid URL (i.e. www.stackoverflow.com).
- That’s all you have to fill up, the rest doesn’t matter.
In the left-side menu, click Edit search engine → {your search engine name}
→ Setup Set Search the entire web to ON. Remove the URL you added from
the list of Sites to search.
- Under Search engine ID you’ll find the search-engine-ID.
4. Enable the Custom Search API
- Navigate to the APIs & Services→Dashboard panel in Cloud Console.
- Click Enable APIs and Services.
- Search for Custom Search API and click on it.
- Click Enable.
URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis
.com
"""
search_engine: Any #: :meta private:
google_api_key: Optional[str] = None
google_cse_id: Optional[str] = None
k: int = 10
siterestrict: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]:
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()
return res.get("items", [])
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
values["google_api_key"] = google_api_key
google_cse_id = get_from_dict_or_env(values, "google_cse_id", "GOOGLE_CSE_ID")
values["google_cse_id"] = google_cse_id
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
"google-api-python-client is not installed. "
"Please install it with `pip install google-api-python-client`"
)
service = build("customsearch", "v1", developerKey=google_api_key, static_discovery=False)
values["search_engine"] = service
return values
def run(self, query: str) -> str:
"""Run query through GoogleSearch and parse result."""
snippets = []
results = self._google_search_results(query, num=self.k)
if len(results) == 0:
return "No good Google Search Result was found"
for result in results:
if "snippet" in result:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(
self,
query: str,
num_results: int,
search_params: Optional[Dict[str, str]] = None,
) -> List[Dict]:
"""Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
search_params: Parameters to be passed on search
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._google_search_results(
query, num=num_results, **(search_params or {})
)
if len(results) == 0:
return [{"Result": "No good Google Search Result was found"}]
for result in results:
metadata_result = {
"title": result["title"],
"link": result["link"],
}
if "snippet" in result:
metadata_result["snippet"] = result["snippet"]
metadata_results.append(metadata_result)
return metadata_results
| [] |
2024-01-10 | tractorjuice/un-global-platform | un-global-platform.py | from llama_index import LLMPredictor, GPTVectorStoreIndex, PromptHelper, download_loader
import streamlit as st
import openai
BASE_PROMPT = [{"role": "system", "content": """
You are a strategy researcher based in the UK.
You use complicated examples from Wardley Mapping and the platform business model in your answers, focusing on lesser-known advice to better illustrate your arguments.
Your language should be for an 12 year old to understand.
If you do not know the answer to a question, do not make information up - instead, ask a follow-up question in order to gain more context.
Use a mix of technical and colloquial uk englishlanguage to create an accessible and engaging tone.
Provide your answers using Wardley Mapping and the Platform Business Model in a form of a sarcastic tweet.
"""}]
openai.api_key = st.secrets["OPENAI_API_KEY"]
YoutubeTranscriptReader = download_loader("YoutubeTranscriptReader")
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=['https://www.youtube.com/watch?v=kUPCGrTUDB8'])
index = GPTVectorStoreIndex.from_documents(documents)
st.set_page_config(page_title="UN Global Platform & Open Source")
st.title("UN Global Platform")
st.sidebar.markdown("# Query this video using AI")
st.sidebar.markdown("Developed by Mark Craddock](https://twitter.com/mcraddock)", unsafe_allow_html=True)
st.sidebar.markdown("Current Version: 0.0.2")
st.video('https://youtu.be/kUPCGrTUDB8')
text = st.empty()
prompt = st.text_input("Prompt", value="What is this video about?")
query_engine = index.as_query_engine()
if st.button("Send"):
with st.spinner("Generating response..."):
response = query_engine.query(prompt)
text.text_area("Messages", response, height=250)
if st.button("Clear"):
st.session_state["messages"] = BASE_PROMPT
show_messages(text)
| [
"What is this video about?",
"\n You are a strategy researcher based in the UK.\n You use complicated examples from Wardley Mapping and the platform business model in your answers, focusing on lesser-known advice to better illustrate your arguments.\n Your language should be for an 12 year old to understand.\n If you do not know the answer to a question, do not make information up - instead, ask a follow-up question in order to gain more context.\n Use a mix of technical and colloquial uk englishlanguage to create an accessible and engaging tone.\n Provide your answers using Wardley Mapping and the Platform Business Model in a form of a sarcastic tweet.\n "
] |
2024-01-10 | rohankumawat/llmmodels | celebrity_search~example1.py | import os
import streamlit as st
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
from langchain.memory import ConversationBufferMemory
# loading the secret key
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY"]
## initialise OpenAI LLM
llm = OpenAI(temperature=0.6)
# streamlit framework
st.title('Celebrity Search Results')
input_text = st.text_input("Search about any Celebrity")
# Prompt Template
first_input_prompt = PromptTemplate(
input_variables=['name'],
template = 'Tell me about the celebrity named, {name}.'
)
# Memory
person_memory = ConversationBufferMemory(input_key='name', memory_key='chat_history')
dob_memory = ConversationBufferMemory(input_key='person', memory_key='chat_history')
descr_memory = ConversationBufferMemory(input_key='dob', memory_key='description_history')
# LLM Chain
chain1 = LLMChain(llm=llm, prompt=first_input_prompt, verbose=True, output_key='person', memory=person_memory)
# Second Prompt Template
second_input_prompt = PromptTemplate(
input_variables=['person'],
template = 'When was {person} born?'
)
chain2= LLMChain(llm=llm, prompt=second_input_prompt, verbose=True, output_key='dob', memory=dob_memory)
# Third Prompt Template
third_input_prompt = PromptTemplate(
input_variables = ['dob'],
template = "Mention 5 major events happened around {dob} in the world."
)
chain3= LLMChain(llm=llm, prompt=third_input_prompt, verbose=True, output_key='description', memory=descr_memory)
parent_chain = SequentialChain(chains=[chain1, chain2, chain3], input_variables=['name'], output_variables=['person', 'dob', 'description'], verbose=True)
if input_text:
st.write(parent_chain({'name':input_text}))
with st.expander('Person Name'):
st.info(person_memory.buffer)
with st.expander('Major Events'):
st.info(descr_memory.buffer) | [
"Mention 5 major events happened around {dob} in the world.",
"Tell me about the celebrity named, {name}.",
"name",
"When was {person} born?",
"person"
] |
2024-01-10 | rohankumawat/llmmodels | chainlit~chainlit.py | import os
from dotenv import load_dotenv
from langchain import PromptTemplate, OpenAI, LLMChain
import chainlit as cl
# loading the secret key
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY"]
template = """Question: {question}
Answer: Let's think step-by-step.
"""
@cl.on_chat_start
def main():
# Instantiate the chain for that user session
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)
# Store the chain in the user session
cl.user_session.set("llm_chain", llm_chain)
@cl.on_message
async def main(message: str):
# Retrieve the chain from the user session
llm_chain = cl.user_session.get("llm_chain") # type: LLMChain
# Call the chain asynchronously
res = await llm_chain.acall(message, callbacks=[cl.AsyncLangchainCallbackHandler()])
# Do any post processing here
# "res" is a Dict. For this chain, we get the response by reading the "text" key.
# This varies from chain to chain, you should check which key to read.
await cl.Message(content=res["text"]).send() | [
"question",
"Question: {question}\n\nAnswer: Let's think step-by-step.\n"
] |
2024-01-10 | AI-Jie01/knowledge_gpt | tests~test_utils.py | from knowledge_gpt.utils import get_sources
from langchain.docstore.document import Document
def test_get_sources():
"""Test getting sources from an answer"""
docs = [
Document(page_content="This is a test document.", metadata={"source": "1-5"}),
Document(page_content="This is a test document.", metadata={"source": "2-6"}),
]
answer = {"output_text": "This is a test answer. SOURCES: 1-5, 3-8"}
sources = get_sources(answer, docs)
assert sources == [docs[0]]
| [] |
2024-01-10 | AI-Jie01/knowledge_gpt | knowledge_gpt~utils.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain import OpenAI, Cohere
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.embeddings import CohereEmbeddings, OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.docstore.document import Document
from langchain.vectorstores import FAISS, VectorStore
import docx2txt
from typing import List, Dict, Any
import re
from io import BytesIO
import streamlit as st
from prompts import STUFF_PROMPT
from pypdf import PdfReader
from openai.error import AuthenticationError
@st.experimental_memo()
def parse_docx(file: BytesIO) -> str:
text = docx2txt.process(file)
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
return text
@st.experimental_memo()
def parse_pdf(file: BytesIO) -> List[str]:
pdf = PdfReader(file)
output = []
for page in pdf.pages:
text = page.extract_text()
# Merge hyphenated words
text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
# Fix newlines in the middle of sentences
text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
output.append(text)
return output
@st.experimental_memo()
def parse_txt(file: BytesIO) -> str:
text = file.read().decode("utf-8")
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
return text
@st.cache(allow_output_mutation=True)
def text_to_docs(text: str | List[str]) -> List[Document]:
"""Converts a string or list of strings to a list of Documents
with metadata."""
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
@st.cache(allow_output_mutation=True)
def embed_docs(docs: List[Document]) -> VectorStore:
"""Embeds a list of Documents and returns a FAISS index"""
if not st.session_state.get("OPENAI_API_KEY"):
raise AuthenticationError(
"Enter your OpenAI API key in the sidebar. You can get a key at https://platform.openai.com/account/api-keys."
)
else:
# Embed the chunks
embeddings = OpenAIEmbeddings(openai_api_key=st.session_state.get("OPENAI_API_KEY")) # type: ignore
index = FAISS.from_documents(docs, embeddings)
return index
@st.cache(allow_output_mutation=True)
def search_docs(index: VectorStore, query: str) -> List[Document]:
"""Searches a FAISS index for similar chunks to the query
and returns a list of Documents."""
# Search for similar chunks
docs = index.similarity_search(query, k=5)
return docs
@st.cache(allow_output_mutation=True)
def get_answer(docs: List[Document], query: str) -> Dict[str, Any]:
"""Gets an answer to a question from a list of Documents."""
# Get the answer
chain = load_qa_with_sources_chain(OpenAI(temperature=0, openai_api_key=st.session_state.get("OPENAI_API_KEY")), chain_type="stuff", prompt=STUFF_PROMPT) # type: ignore
# Cohere doesn't work very well as of now.
# chain = load_qa_with_sources_chain(Cohere(temperature=0), chain_type="stuff", prompt=STUFF_PROMPT) # type: ignore
answer = chain(
{"input_documents": docs, "question": query}, return_only_outputs=True
)
return answer
@st.cache(allow_output_mutation=True)
def get_sources(answer: Dict[str, Any], docs: List[Document]) -> List[Document]:
"""Gets the source documents for an answer."""
# Get sources for the answer
source_keys = [s for s in answer["output_text"].split("SOURCES: ")[-1].split(", ")]
source_docs = []
for doc in docs:
if doc.metadata["source"] in source_keys:
source_docs.append(doc)
return source_docs
def wrap_text_in_html(text: str | List[str]) -> str:
"""Wraps each text block separated by newlines in <p> tags"""
if isinstance(text, list):
# Add horizontal rules between pages
text = "\n<hr/>\n".join(text)
return "".join([f"<p>{line}</p>" for line in text.split("\n")])
| [] |
2024-01-10 | boo105/study | Study~textmining.py | import pandas as pd
import nltk
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
# 토큰화
def flatten(l):
flatList = []
for elem in l:
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
# 단어 빈도분석
def word_frequency():
f = open("C:\\Users\\user\\Desktop\\MinHo\\Python\\Study\\트럼프취임연설문.txt", 'r')
# 트럼프취임연설문은 enter가 삽입되지 않은 한 줄 짜리 데이터 이므로 f.readline()으로 저장 할 경우 [내용] 내용 양옆에 대괄호가 쳐져있다.
# 슬라이싱을 하기 위해 f.readlines()[0]을 사용 하였다.
lines = f.readlines()[0]
f.close()
print(lines[0:100])
tokenizer = RegexpTokenizer('[\w]+') # 정규표현식
stop_words = stopwords.words('english') # 불용어
words = lines.lower() # 소문자로 변환
tokens = tokenizer.tokenize(words) # 토큰화
stopped_tokens = [i for i in list((tokens)) if not i in stop_words] # 불용어 제거
stopped_tokens2 = [i for i in stopped_tokens if len(i) > 1] # 단어수가 2개인것부터 단어 취급해 저장 (글자수가 하나짜리는 단어가 아님)
# Counter를 사용해 counter를 할 수도 있지만 여기서는 pandas.Series.value_counts() 를 씀
print(pd.Series(stopped_tokens2).value_counts().head(10))
"""
f = open("C:\\Users\\user\\Desktop\\MinHo\\Python\\Study\\문재인대통령취임연설문.txt", 'r')
lines = f.readlines()
f.close()
# flatten 에서 lines 가 토큰화 안되는 문제? 발생
word_list = flatten(lines) # 한국어토큰화를 이용해 토큰화를 해줘도 되는데 여기서는 직접만든 토큰화를 적용시킴
word_list = pd.Series([x for x in word_list if len(x) > 1]) # 단어수가 하나 이상일떄만 단어로 저장
print(word_list.value_counts().head(10))
"""
# k-평군 군집화 (분할 군집 분석)
def clustering():
# 많은 알고리즘이 있지만 주로 분할 군집 분석 과 구조적 군집 분석이 쓰인다.
# 군집화는 비지도학습이다. , 군집이란 비슷한 특징을 갖는 데이터 집단
# k-평균 군집 분석 -> 주이진 데이터를 k개의 클러스터로 묶는 알고리즘 (분류의 기준은 거리)
# 각 클러스터와 거리 차이의 분산을 최소화하는 방식으로 작동한다.
# 지도학습과 달리 기준이 되는 라벨이 없기 때문에 알고리즘이 데이터를 기준으로 특성(feature) 벡터들의 분산과 거리를 기준으로 카테고리를 자동으로 구성한다.
# 코사인 유사도는 0~1 사이의 값을 가지므로 코사인 유사도를 이용한 거리는 1-유사도로 정의한다. (텍스트 분석에 대한 군집분석에는 코사인 유사도가 효과적임)
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cluster import KMeans
from konlpy.tag import Hannanum
from sklearn.decomposition import PCA
hannanum = Hannanum()
Data = pd.read_csv("C:\\Users\\user\\Desktop\\MinHo\\Python\\Study\\군집분석데이터.csv", engine = "python")
#print(Data.head())
# 군집화
docs = []
# 첫번째 for문이 끝나면 docs는 이중리스트 형태로 저장이 되있다.
for i in Data['기사내용']:
docs.append(hannanum.nouns(i)) # 명사 추출
# 두번째 for문이 끝나면 docs는 [문장1,문장2,...,문장15] 같이 변형이 되있음
for i in range(len(docs)):
docs[i] = ' '.join(docs[i])
vec = CountVectorizer()
# fit_transform이란 데이터를 정규화 해줌 fit() 평균빼고 표준편차로 나눠줌 transform() 변환을 적용
X = vec.fit_transform(docs) # 추출한 단어들을 이용하여 문서-단어 매트릭스를 생성함
df = pd.DataFrame(X.toarray(), columns=vec.get_feature_names()) # 배열 형태로 DataFrame화 시킴
kmeans = KMeans(n_clusters=3).fit(df) # kmeans 로 군집화 k = 3
#print(kmeans.labels_)
# 시각화를 위해 pca기법 사용해 분석 결과를 2차원으로 축소
pca = PCA(n_components=2) # n_components 는 주성분을 몇개로 할지 (몇 차원을 할지)
principalComponents = pca.fit_transform(df)
principalDf = pd.DataFrame(data=principalComponents, columns=['pricipal component 1', 'principal component 2'])
principalDf.index = Data['검색어']
plt.scatter(principalDf.iloc[kmeans.labels_ == 0, 0], principalDf.iloc[kmeans.labels_ == 0, 1], s=10, c='red', label='cluster1')
plt.scatter(principalDf.iloc[kmeans.labels_ == 1, 0], principalDf.iloc[kmeans.labels_ == 1, 1], s=10, c='blue', label='cluster2')
plt.scatter(principalDf.iloc[kmeans.labels_ == 2, 0], principalDf.iloc[kmeans.labels_ == 2, 1], s=10, c='green',label='cluster3')
plt.legend()
plt.show()
# k-대표값 군집화 (분할 군집 분석)
def clustering2():
# k-대표값 군집 분석 (k-평균 분석은 이상치에 매우 민감하므로 민감성을 보완한 알고리즘이다.)
# 분석시 반복 횟수가 많아져 데이터가 많아지면 분석 시간도 대폭 증가함
from pyclustering.cluster import kmedoids
import numpy as np
from konlpy.tag import Hannanum
from sklearn.feature_extraction.text import CountVectorizer
hannanum = Hannanum()
Data = pd.read_csv("C:\\Users\\user\\Desktop\\MinHo\\Python\\Study\\군집분석데이터.csv", engine="python")
docs = []
for i in Data['기사내용']:
docs.append(hannanum.nouns(i))
for i in range(len(docs)):
docs[i] = ' '.join(docs[i])
vec = CountVectorizer()
X = vec.fit_transform(docs)
df = pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
# k-대표값 군집 분석
kmedoids_instance = kmedoids.kmedoids(df.to_numpy(), initial_index_medoids=np.random.randint(15, size=3)) # 군집화 모델 생성 이때 대표값은 데이터 개수 15개중에 size= 군집화개수 만큼 뽑는다.
kmedoids_instance.process() # 군집화 반복 실행
clusters = kmedoids_instance.get_clusters()
print(clusters)
# 군집화 (구조적 군집 분석)
def clustering3():
from sklearn.cluster import AgglomerativeClustering
import scipy.cluster.hierarchy as shc
from konlpy.tag import Hannanum
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
hannanum = Hannanum()
Data = pd.read_csv("C:\\Users\\user\\Desktop\\MinHo\\Python\\Study\\군집분석데이터.csv", engine="python")
docs = []
for i in Data['기사내용']:
docs.append(hannanum.nouns(i))
for i in range(len(docs)):
docs[i] = ' '.join(docs[i])
vec = CountVectorizer()
X = vec.fit_transform(docs)
df = pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
#print(df)
# 구조적 군집 분석
cluster = AgglomerativeClustering(n_clusters=3, linkage='ward') # linkage 는 가장 비슷한 클러스트를 측정하는 방법 ward는 기본값, 모든 클러스터 내의 분산을 가장 작게 증가시키는 두 클러스터를 합침, 크기가 비교적 비슷한 클러스터가 만들어짐
print(cluster.fit_predict(df))
# 시각화
plt.figure(figsize=(10, 7))
plt.title("Customer Dendrograms")
# create dendrogram 구조적으로 어떤식으로 생겼는지 보기위해 dendrogram 생성해 시각화 시키는거임 scatter로도 표현됨
dend = shc.dendrogram(shc.linkage(df, method='ward'))
plt.show(dend)
# LDA(토픽 모델링) 초창기 모델임
def lda():
# 토픽 모델링은 구조화되지 않은 방대한 문헌집단에서 주제를 찾아내기 위한 통계적 추론 알고리즘이다.
# 맥락과 관련된 단어들을 이용하여 의미를 가진 단어들을 클러스터링하여 주제를 추론한다.
# 감성 분석과 같은 타 분석 모델과 혼합하여 자주 쓰인다.
# 표, 단어구름, 단어 네트워크, 연도별 그래프 등 다양한 시각화 기법과 결합했을 때 더 효과적이다.
# LDA에서는 단어의 교환성만을 가정한다. 교환성 : 단어의 순서는 고려하지않고 단어의 유무만 중요
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import gensim
from gensim.models import CoherenceModel
from nltk.tokenize import RegexpTokenizer
# 토큰화 설정
tokenizer = RegexpTokenizer('[\w]+')
stop_words = stopwords.words('english')
p_stemmer = PorterStemmer()
# 문장들
doc_a = "Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother."
doc_b = "My mother spends a lot of time driving my brother around to baseball practice."
doc_c = "Some health experts suggest that driving may cause increased tension and blood pressure."
doc_d = "I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better."
doc_e = "Health professionals say that brocolli is good for your health."
doc_f = "Big data is a term used to refer to data sets that are too large or complex for traditional data-processing application software to adequately deal with."
doc_g = "Data with many cases offer greater statistical power, while data with higher complexity may lead to a higher false discovery rate"
doc_h = "Big data was originally associated with three key concepts: volume, variety, and velocity."
doc_i = "A 2016 definition states that 'Big data represents the information assets characterized by such a high volume, velocity and variety to require specific technology and analytical methods for its transformation into value'."
doc_j = "Data must be processed with advanced tools to reveal meaningful information."
# 리스트화
doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e, doc_f, doc_g, doc_h, doc_i, doc_j]
texts = []
# 토큰화
for w in doc_set:
raw = w.lower() # 소문자 변환
tokens = tokenizer.tokenize(raw) # 토큰화
stopped_tokens = [i for i in tokens if not i in stop_words] # 불용어 제거
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens] # 어간, 표제어 추출
texts.append(stemmed_tokens)
# 문서의 단어들을 사전형으로 바꿈
dictionary = gensim.corpora.Dictionary(texts)
# 문서-단어 매트릭스 형성
corpus = [dictionary.doc2bow(text) for text in texts] # doc2bow 함수는 단어 - 빈도수 형태로 바꿈
# topic 개수=3으로 지정
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=3, id2word=dictionary)
# 토픽별로 5개씩 단어 출력
print(ldamodel.print_topics(num_words=5)) # 단어에 곱해진 숫자는 가중치이다. 해당 단어가 토픽에서 설명하는 비중을 나타냄(빈도)
print(ldamodel.get_document_topics(corpus)[0]) # 0번째 문장에서의 각 토픽 분포 모든 분포의 확률 합은 1이다.
# 통계정 방법은 크게 perlexity , topic coherence 가 있다.(토픽개수를 입력하기위한)
print('\nPerlexity: ', ldamodel.log_perplexity(corpus)) # 모델이 실측값을 얼마나 잘 예측하는지 평가할떄 쓰이는 수치 (작을수록 좋음)
# 상위 10개(topn=10)의 단어를 이용하여 유사도를 계산
coherence_model_lda = CoherenceModel(model=ldamodel, texts=texts, dictionary=dictionary, topn=10) # 유사도 모델 생성하는데 시간이 좀 걸림
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# 토픽개수의 따른 perplexity 시각화
perplexity_values = []
for i in range(2, 10):
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=i, id2word=dictionary) # 토픽 개수의 따른 모델 생성
perplexity_values.append(ldamodel.log_perplexity(corpus)) # 토픽개수별 perplexity 를 저장함
x = range(2, 10)
plt.plot(x, perplexity_values)
plt.xlabel("Number of topics")
plt.ylabel("Perplexity score")
plt.show()
# 토픽개수의 따른 coherence 시각화
coherence_values = []
for i in range(2, 10):
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=i, id2word=dictionary)
coherence_model_lda = CoherenceModel(model=ldamodel, texts=texts, dictionary=dictionary, topn=10) # 토픽개수의 따라 만든 모델에 따른 유사도 모델 생성
coherence_lda = coherence_model_lda.get_coherence()
coherence_values.append(coherence_lda) # 토픽개수별 coherence 를 저장함
x = range(2, 10)
plt.plot(x, coherence_values)
plt.xlabel("Number of topics")
plt.ylabel("Coherence score")
plt.show()
if __name__ == "__main__":
lda()
#clustering3()
#word_frequency()
| [] |
2024-01-10 | vrsarin/aml-research | workers~ingestion~functions.py | """Module providing a open AI function to retrieve knowledge graph."""
# Adapted version from
# https://github.com/tomasonjo/blogs/blob/master/llm/openaifunction_constructing_graph.ipynb?ref=blog.langchain.dev
from typing import List, Optional
from langchain.pydantic_v1 import Field, BaseModel
from langchain.graphs.graph_document import (
Node as BaseNode,
Relationship as BaseRelationship
)
class Property(BaseModel):
"""A single property consisting of key and value"""
key: str = Field(..., description="key")
value: str | None = Field(..., description="value")
class Node(BaseNode):
"""A single node of Knowledge graph"""
properties: Optional[List[Property]] = Field(
None, description="List of node properties"
)
class Relationship(BaseRelationship):
"""A relation between two nodes of a Knowledge graph"""
properties: Optional[List[Property]] = Field(
None, description="List of relationship properties"
)
class KnowledgeGraph(BaseModel):
"""Generate a knowledge graph with entities and relationships."""
nodes: List[Node] = Field(...,
description="List of nodes in the knowledge graph")
rels: List[Relationship] = Field(
..., description="List of relationships in the knowledge graph"
)
| [] |
2024-01-10 | vrsarin/aml-research | workers~ingestion~ner_extrator.py | # Adapted version from
# https://github.com/tomasonjo/blogs/blob/master/llm/openaifunction_constructing_graph.ipynb?ref=blog.langchain.dev
"""Retrieve knowledge graph from provided document"""
from typing import List, Dict, Any, Optional
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import Document
from langchain.graphs.graph_document import (
Node as BaseNode,
Relationship as BaseRelationship,
GraphDocument
)
from functions import (Node, Relationship, KnowledgeGraph)
from openai_prompts import (ner_prompt)
# This need to be coming from environment
llm = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=0)
def format_property_key(s: str) -> str:
"""Format property key to be in camelCase"""
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return "".join([first_word] + capitalized_words)
def props_to_dict(props) -> dict:
"""Convert properties to a dictionary."""
properties = {}
if not props:
return properties
for p in props:
properties[format_property_key(p.key)] = p.value
return properties
def map_to_base_node(node: Node) -> BaseNode:
"""Map the KnowledgeGraph Node to the base Node."""
properties = props_to_dict(node.properties) if node.properties else {}
# Add name property for better Cypher statement generation
properties["name"] = node.id.title()
return BaseNode(
id=node.id.title(), type=node.type.capitalize(), properties=properties
)
def map_to_base_relationship(rel: Relationship) -> BaseRelationship:
"""Map the KnowledgeGraph Relationship to the base Relationship."""
source = map_to_base_node(rel.source)
target = map_to_base_node(rel.target)
properties = props_to_dict(rel.properties) if rel.properties else {}
return BaseRelationship(
source=source, target=target, type=rel.type, properties=properties
)
def get_extraction_chain(allowed_nodes: Optional[List[str]] = None,
allowed_rels: Optional[List[str]] = None,
restricted_nodes: Optional[List[str]] = None,
existing_entities: Optional[List[str]] = None,):
"""OpenAI Langchain chain to retrieve NER using """
# print(system_prompt)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
ner_prompt(allowed_nodes,
allowed_rels,
restricted_nodes,
existing_entities),
),
(
"human",
"Use the given format to extract information from the following input: {input}",
),
("human", "Tip: Make sure to answer in the correct format"),
]
)
return create_structured_output_chain(KnowledgeGraph, llm, prompt, verbose=False)
def extract_graph(document: Document,
nodes: Optional[List[str]] = None,
rels: Optional[List[str]] = None,
restricted_nodes: Optional[List[str]] = None,
existing_entities: Optional[List[str]] = None,) -> GraphDocument:
"""Extract Knowledge Graph from a given document"""
extract_chain = get_extraction_chain(
nodes, rels, restricted_nodes, existing_entities)
data = extract_chain.run(document.page_content)
graph_document = GraphDocument(
nodes=[map_to_base_node(node) for node in data.nodes],
relationships=[map_to_base_relationship(rel) for rel in data.rels],
source=document,
)
return graph_document
| [
"Use the given format to extract information from the following input: {input}",
"Tip: Make sure to answer in the correct format",
"human"
] |
2024-01-10 | vrsarin/aml-research | workers~ingestion~graph_repository.py | """Neo4J Graph Database Repository"""
import json
import pathlib
from typing import List, Dict, Any
from langchain.graphs import Neo4jGraph
from langchain.graphs.graph_document import GraphDocument
# This need to be coming from environment
graph_db = Neo4jGraph()
graph_db.refresh_schema()
def save_graph_json(file_name: str, graph: GraphDocument):
"""Save Graph Data to disk"""
file_path = pathlib.Path(file_name)
with open(file_path, "w", encoding="utf-8") as swr:
json.dump(graph.to_json(), swr, ensure_ascii=False)
def save_graph(document: GraphDocument):
"""Store finalized graph into database"""
graph_db.add_graph_documents([document])
def execute_query(query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Run Query against DB"""
return graph_db.query(query=query, params=params)
| [] |
2024-01-10 | KsiuTretyakova/IT-Camp_Fri | JARVIS.py | import os
import sys
import webbrowser
import speech_recognition as sr
# -------------------------------------------------------------------------------
import openai
from dotenv import load_dotenv as ld
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
if os.path.exists(dotenv_path):
ld(dotenv_path)
openai.api_key = os.getenv("api_key")
def ai_response(my_task):
copletion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": my_task}]
)
return copletion
#-------------------------------------------------------------------------------
# ------------------
import pyttsx3
engine = pyttsx3.init()
# engine.say("Текст")
# engine.runAndWait()
# -------------------
def talk(words):
print(words)
# os.system("say " + words)
engine.say(words)
engine.runAndWait()
# talk("Привіт, чим можу допомогти?")
talk("Hello")
r = sr.Recognizer()
def command():
global r
with sr.Microphone() as source:
print("Say")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
# task = r.recognize_google(audio, language="en-US").lower() # ru-RU
# task = r.recognize_google(audio, language="ru-RU").lower() # ru-RU
task = r.recognize_google(audio, language="uk-UA").lower() # ru-RU
print("You: " + task)
except sr.UnknownValueError:
talk("Don`t understand")
task = command()
return task
def make_something(ar_task):
global r
if ("відкрити" and "сайт") in ar_task:
talk("ok")
url = "https://ituniver.com"
webbrowser.open(url)
elif "стоп" in ar_task:
talk("Good bye")
sys.exit()
elif "ім'я" in ar_task:
talk("My name is JARVIS")
else:
# print(handle_input(input("You: ")).choices[0].message.content)
try:
ai_res = ai_response(ar_task).choices[0].message.content
talk(ai_res)
except openai.error.ServiceUnavailableError:
talk("Виникла помилка, спробую ще раз")
try:
ai_res = ai_response(ar_task).choices[0].message.content
talk(ai_res)
except openai.error.ServiceUnavailableError:
talk("Не можу обробити відповідь, запитай ще раз")
except openai.error.RateLimitError:
talk("Спробуй через 20 секунд")
r.pause_threshold = 20
except:
talk("Ops, щось пішло не так. Спробуйте ще")
while True:
make_something(command())
| [] |
2024-01-10 | KsiuTretyakova/IT-Camp_Fri | ChatAI.py | import openai
import os
from dotenv import load_dotenv as ld
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
if os.path.exists(dotenv_path):
ld(dotenv_path)
# path=os.path.dirname(__file__).replace('//', '\')
# dotenv_path=f'{path}\.env'
openai.api_key = os.getenv("api_key")
models = openai.Model.list()
# print(models)
def handle_input(user_input):
copletion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": user_input}]
)
return copletion
# print(handle_input(input()).choices[0].message.content)
while True:
print(handle_input(input("You: ")).choices[0].message.content)
| [] |
2024-01-10 | gauravlahotigl/pycon-india-poster | Pycon%20India~ai_models.py | from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.schema import HumanMessage, SystemMessage
from langchain import HuggingFaceHub, LLMChain
import warnings
warnings.filterwarnings('ignore')
load_dotenv()
text = 'Hi' #variable to start the conversation
def bot2(input_prompt): #bot2 function
# model_id = "google/flan-t5-small" #355M parameters #hugging face
# conv_model = HuggingFaceHub(repo_id=model_id,
# model_kwargs={"temperature":0.8}) #0 to 1
# template = """Respond to your AI friend's message without repeated greetings. Feel free to engage
# openly and bring up any random topics. Keep your responses concise, within a word limit of 100-150
# words, and don't limit yourself to one subject. Even if there's a loop, you will respond as if there
# were a new thing said. If you run out of the things to talk about, bring up a new topic.
# {query}
# """
# prompt = PromptTemplate(template=template, input_variables=['query'])
# conv_chain = LLMChain(llm=conv_model,
# prompt=prompt,
# verbose=True)
# print(conv_chain.run(str(input_prompt)))
chatbot = ChatOpenAI(temperature=0)
message = chatbot([
SystemMessage(content='''Respond to your AI friend's message without repeated greetings. Feel free to engage
openly and bring up any random topics. Keep your responses concise, within a word limit of 50-80
words strictly, and don't limit yourself to one subject. Even if there's a loop, you will respond as if there
were a new thing said. If you run out of the things to talk about, bring up a new topic. If you stuck in a loop where
you get same answer repeatedly then try to change the topic.'''),
HumanMessage(content=str(input_prompt))
]).content
print('bot2 message:', message)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------')
global text
text = message
def bot1(input_prompt):
chatbot = ChatOpenAI(temperature=0)
message = chatbot([
SystemMessage(content='''Respond to your AI friend's message without repeated greetings. Feel free to engage
openly and bring up any random topics. Keep your responses concise, within a word limit of 50-80
words strictly, and don't limit yourself to one subject. Even if there's a loop, you will respond as if there
were a new thing said. If you run out of the things to talk about, bring up a new topic. If you stuck in a loop where
you get same answer repeatedly then try to change the topic.'''),
HumanMessage(content=str(input_prompt))
]).content
print('bot1 message:', message)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------')
bot2(str(message))
while True:
bot1(text)
| [
"Respond to your AI friend's message without repeated greetings. Feel free to engage \n openly and bring up any random topics. Keep your responses concise, within a word limit of 50-80 \n words strictly, and don't limit yourself to one subject. Even if there's a loop, you will respond as if there \n were a new thing said. If you run out of the things to talk about, bring up a new topic. If you stuck in a loop where\n you get same answer repeatedly then try to change the topic."
] |
2024-01-10 | gauravlahotigl/pycon-india-poster | Pycon%20India~huggingxlangchain.py | # Use a pipeline as a high-level helper
from transformers import pipeline
import warnings
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
import gtts
from playsound import playsound
warnings.filterwarnings('ignore')
load_dotenv()
#speech-to-text conversion
speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2") #loading OpenAI whisper model using Huggingface
converted_text = speech_to_text('./Audio/Harohalli.m4a') #passing the recorded audio to the model
converted_text = converted_text['text']
print('Audio input:' + converted_text)
#getting the answer of the question using ChatGPT using Langchain
chatgpt = ChatOpenAI(temperature=0.1) #instantiating model
message = chatgpt([
HumanMessage(content=str(converted_text))
]).content #passing the converted audio as an input ot ChatGPT
print('ChatGPT output:' + message)
#converting text to speech
# text_to_speech = pipeline("text-to-speech", model="microsoft/speecht5_tts") #loading the model
# text_to_speech(message)
tts = gtts.gTTS(message)
tts.save('tts.mp3')
playsound('tts.mp3') | [] |
2024-01-10 | gauravlahotigl/pycon-india-poster | Pycon%20India~ai_models_memory.py | from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
load_dotenv()
text = 'Hi'
def bot2(input_prompt):
chatbot = ChatOpenAI(temperature=0.5)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm = chatbot,
memory = memory,
verbose = False
)
message = conversation.predict(input = '''Respond to your AI friend's message without repeated greetings. Feel free to engage
openly and bring up any random topics. Keep your responses concise, within a word limit of 50-80
words strictly, and don't limit yourself to one subject. Even if there's a loop, you will respond as if there
were a new thing said. If you run out of the things to talk about, bring up a new topic. If you stuck in a loop where
you get same answer repeatedly then try to change the topic.''' + str(input_prompt))
print('model2 message:', message)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------')
global text
text = message
def bot1(input_prompt):
chatbot = ChatOpenAI(temperature=0.5)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm = chatbot,
memory = memory,
verbose = False
)
message = conversation.predict(input = '''Respond to your AI friend's message without repeated greetings. Feel free to engage
openly and bring up any random topics. Keep your responses concise, within a word limit of 50-80
words strictly, and don't limit yourself to one subject. Even if there's a loop, you will respond as if there
were a new thing said. If you run out of the things to talk about, bring up a new topic. If you stuck in a loop where
you get same answer repeatedly then try to change the topic.''' + str(input_prompt))
print('model1 message:', message)
print('-------------------------------------------------------------------------------------------------------------------------------------------------------')
bot2(str(message))
while True:
bot1(text) | [] |
2024-01-10 | Andi5986/language_agent | language_agent~module.py | import os
from dotenv import load_dotenv
import openai
# Load environment variables from .env file
load_dotenv()
def call_openai_api(prompt):
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "user", "content": prompt}
]
)
return response['choices'][0]['message']['content']
| [] |
2024-01-10 | jonaengs/kokebok-api | kokebok~recipes~image_parsing~text_parsing.py | import math
from typing import Literal
import openai
ALLOWED_MODELS = Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"]
DEFAULT_GPT_MODEL: ALLOWED_MODELS = "gpt-3.5-turbo"
# Pricing is USD per 1k tokens
# Pricing data updated: 2023/10/13
GPT_PRICING: dict[ALLOWED_MODELS, dict[Literal["input", "output"], float]] = {
"gpt-3.5-turbo": {"input": 0.0015, "output": 0.002},
"gpt-3.5-turbo-16k": {"input": 0.003, "output": 0.004},
"gpt-4": {"input": 0.03, "output": 0.06},
}
GPT_SYSTEM_PROMPT = """\
Your task is to structure text. Specifically, you will be given text
extracted from images of cooking recipes, and will give structure to the
text by dividing it into the following categories:
title, preamble, yield, content, instructions, ingredients.
Each piece of text fits in a single category.
Many of the categories may not appear in the recipe. However,
the "instructions" and "ingredients" categories almost always do.
The "content" category is only to be used for pieces of text that do
not fit in any of the other categories.
Ingredients may be grouped by which part of the recipe they are used in.
Try to preserve these groupings if possible.
In the following example, there are two groups and four ingredients:
"
For the chicken:
1 lb chicken thighs
2 tsp chicken spice
For the sauce:
1/4 cup white wine
1 tbsp unsalted butter
"
Therefore, the groupings will be as follows:
```
{
"For the chicken": ["1 lb chicken thighs", "2 tsp chicken spice"]`,
"For the sauce": ["1/4 cup white wine", "1 tbsp unsalted butter"],
}
```
The user input will be the recipe text. Your reply should be the recipe text
divided up into the categories described above.
You are NOT allowed to alter the recipe text in any semantically meaningful way.
You will not duplicate text.
You may remove words and/or characters if it is clear that they are wrongful
artefacts produced by the OCR performed on the image.
The output shoudl be formatted using the JSON format. Your output will be a
single JSON object with a series of keys mapping to values.
Each category will be a key, and the text belonging to that category will
be the value. You may turn strings that represent lists into JSON arrays.
Groupings of ingredients should be preserved. This is achieved by
representing the ingredients as a JSON object, with the keys being
the ingredient group names and the values being the list of ingredients
belonging to that group. If no group name for the ingredients is given,
all ingredients can be placed under a single a key equalling the empty string ("").
An example output object could look like this:
{
"title": "Pancakes with homemade blueberry jam",
"ingredients": {
"Pancakes": [
"1 packet of pancake mix",
"Butter",
],
"Blueberry jam": [
"300 grams fresh blueberries",
"100 grams sugar"
]
},
"instructions": [
"Create the pancake batter as instructed on the packet",
"Leave the batter to swell",
"Mix the blueberries and sugar, before crushing them with a fork",
"Fry the pancakes",
"Serve the fresh pancakes with your delicious homemade blueberry jam"
],
"yields": "2 servings"
}
"""
# noqa
GPT_USER_HINT_INTRO = (
"You have been provided with the following information about "
"the document to help you parse the it correctly:\n"
)
def text_to_recipe(text: str, user_hint: str = "") -> str:
"""
Asks ChatGPT to structure the input text according to the system prompt.
Returns ChatGPT's response (assumed to be valid JSON) as a dict.
"""
# Estimate the number of tokens in the input. Likely a pessimistic estimate
# System prompt is in English, which generally has 4 tokens per character.
estimate_system_prompt_tokens = len(GPT_SYSTEM_PROMPT) / 3.5
# User input may be non-english and contain numeric and special characters.
# Thus it is likely/possible that char-to-token ratio is lower.
estimate_user_input_tokens = len(text) / 2.5
estimate_total_input_tokens = math.ceil(
estimate_system_prompt_tokens + estimate_user_input_tokens
)
print(f"{estimate_total_input_tokens=}")
# Set GPT model to use
gpt_model: ALLOWED_MODELS = DEFAULT_GPT_MODEL
if estimate_total_input_tokens > 16_000:
raise ValueError("Text input calculated too large for model context.")
if gpt_model == "gpt-3.5-turbo" and estimate_total_input_tokens > 4_000:
gpt_model = "gpt-3.5-turbo-16k"
# Construct the chat messages
chat_messages = [{"role": "system", "content": GPT_SYSTEM_PROMPT}]
if user_hint:
chat_messages.append(
{"role": "system", "content": GPT_USER_HINT_INTRO + f'"{user_hint}"'}
)
chat_messages.append({"role": "user", "content": text})
# create API docs: https://platform.openai.com/docs/api-reference/chat/create
response = openai.ChatCompletion.create(
presence_penalty=-1, # Discourage new topics
temperature=0.2, # Make model more predictable
model=gpt_model,
messages=chat_messages,
)
if response["choices"][0]["finish_reason"] == "content_filter":
print(response)
raise ValueError("ChatGPT stopped due to content filter.")
print(response)
estimate_input_cost = (
estimate_total_input_tokens * GPT_PRICING[gpt_model]["input"] / 1000
)
input_cost = (
response["usage"]["prompt_tokens"] * GPT_PRICING[gpt_model]["input"] / 1000
)
output_cost = (
response["usage"]["completion_tokens"] * GPT_PRICING[gpt_model]["output"] / 1000
)
total_cost = input_cost + output_cost
print("COSTS:")
print(f"{input_cost=} ({estimate_input_cost=})")
print(f"{output_cost=}")
print(f"Total cost: ${total_cost} (~ equal to NOK {10*total_cost})")
response_text = response["choices"][0]["message"]["content"]
return response_text
| [
"Your task is to structure text. Specifically, you will be given text\nextracted from images of cooking recipes, and will give structure to the\ntext by dividing it into the following categories:\n title, preamble, yield, content, instructions, ingredients.\n\nEach piece of text fits in a single category.\nMany of the categories may not appear in the recipe. However,\nthe \"instructions\" and \"ingredients\" categories almost always do.\nThe \"content\" category is only to be used for pieces of text that do\nnot fit in any of the other categories.\n\nIngredients may be grouped by which part of the recipe they are used in.\nTry to preserve these groupings if possible.\nIn the following example, there are two groups and four ingredients:\n\"\nFor the chicken:\n1 lb chicken thighs\n2 tsp chicken spice\nFor the sauce:\n1/4 cup white wine\n1 tbsp unsalted butter\n\"\nTherefore, the groupings will be as follows:\n```\n{\n \"For the chicken\": [\"1 lb chicken thighs\", \"2 tsp chicken spice\"]`,\n \"For the sauce\": [\"1/4 cup white wine\", \"1 tbsp unsalted butter\"],\n}\n```\n\nThe user input will be the recipe text. Your reply should be the recipe text\ndivided up into the categories described above.\n\nYou are NOT allowed to alter the recipe text in any semantically meaningful way.\nYou will not duplicate text.\nYou may remove words and/or characters if it is clear that they are wrongful\nartefacts produced by the OCR performed on the image.\n\nThe output shoudl be formatted using the JSON format. Your output will be a\nsingle JSON object with a series of keys mapping to values.\nEach category will be a key, and the text belonging to that category will\nbe the value. You may turn strings that represent lists into JSON arrays.\n\nGroupings of ingredients should be preserved. This is achieved by\nrepresenting the ingredients as a JSON object, with the keys being\nthe ingredient group names and the values being the list of ingredients\nbelonging to that group. If no group name for the ingredients is given,\nall ingredients can be placed under a single a key equalling the empty string (\"\").\n\nAn example output object could look like this:\n{\n \"title\": \"Pancakes with homemade blueberry jam\",\n \"ingredients\": {\n \"Pancakes\": [\n \"1 packet of pancake mix\",\n \"Butter\",\n ],\n \"Blueberry jam\": [\n \"300 grams fresh blueberries\",\n \"100 grams sugar\"\n ]\n },\n \"instructions\": [\n \"Create the pancake batter as instructed on the packet\",\n \"Leave the batter to swell\",\n \"Mix the blueberries and sugar, before crushing them with a fork\",\n \"Fry the pancakes\",\n \"Serve the fresh pancakes with your delicious homemade blueberry jam\"\n ],\n \"yields\": \"2 servings\"\n}\n",
"PLACEHOLDER\"PLACEHOLDER\"",
"782.2857142857143"
] |
2024-01-10 | AntSimi/py-eddy-tracker | src~py_eddy_tracker~observations~network.py | # -*- coding: utf-8 -*-
"""
Class to create network of observations
"""
from glob import glob
import logging
import time
import netCDF4
from numba import njit, types as nb_types
from numba.typed import List
from numpy import (
arange,
array,
bincount,
bool_,
concatenate,
empty,
nan,
ones,
percentile,
uint16,
uint32,
unique,
where,
zeros,
)
import zarr
from ..dataset.grid import GridCollection
from ..generic import build_index, wrap_longitude
from ..poly import bbox_intersection, vertice_overlap
from .groups import GroupEddiesObservations, get_missing_indices, particle_candidate
from .observation import EddiesObservations
from .tracking import TrackEddiesObservations, track_loess_filter, track_median_filter
logger = logging.getLogger("pet")
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class Buffer(metaclass=Singleton):
__slots__ = (
"buffersize",
"contour_name",
"xname",
"yname",
"memory",
)
DATA = dict()
FLIST = list()
def __init__(self, buffersize, intern=False, memory=False):
self.buffersize = buffersize
self.contour_name = EddiesObservations.intern(intern, public_label=True)
self.xname, self.yname = EddiesObservations.intern(intern)
self.memory = memory
def load_contour(self, filename):
if isinstance(filename, EddiesObservations):
return filename[self.xname], filename[self.yname]
if filename not in self.DATA:
if len(self.FLIST) > self.buffersize:
self.DATA.pop(self.FLIST.pop(0))
if self.memory:
# Only if netcdf
with open(filename, "rb") as h:
e = EddiesObservations.load_file(h, include_vars=self.contour_name)
else:
e = EddiesObservations.load_file(
filename, include_vars=self.contour_name
)
self.FLIST.append(filename)
self.DATA[filename] = e[self.xname], e[self.yname]
return self.DATA[filename]
@njit(cache=True)
def fix_next_previous_obs(next_obs, previous_obs, flag_virtual):
"""When an observation is virtual, we have to fix the previous and next obs
:param np.array(int) next_obs : index of next observation from network
:param np.array(int previous_obs: index of previous observation from network
:param np.array(bool) flag_virtual: if observation is virtual or not
"""
for i_o in range(next_obs.size):
if not flag_virtual[i_o]:
continue
# if there are several consecutive virtuals, some values are written multiple times.
# but it should not be slow
next_obs[i_o - 1] = i_o
next_obs[i_o] = i_o + 1
previous_obs[i_o] = i_o - 1
previous_obs[i_o + 1] = i_o
class NetworkObservations(GroupEddiesObservations):
__slots__ = ("_index_network", "_index_segment_track", "_segment_track_array")
NOGROUP = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_index()
def __repr__(self):
m_event, s_event = (
self.merging_event(only_index=True, triplet=True)[0],
self.splitting_event(only_index=True, triplet=True)[0],
)
period = (self.period[1] - self.period[0]) / 365.25
nb_by_network = self.network_size()
nb_trash = 0 if self.ref_index != 0 else nb_by_network[0]
big = 50_000
infos = [
f"Atlas with {self.nb_network} networks ({self.nb_network / period:0.0f} networks/year),"
f" {self.nb_segment} segments ({self.nb_segment / period:0.0f} segments/year), {len(self)} observations ({len(self) / period:0.0f} observations/year)",
f" {m_event.size} merging ({m_event.size / period:0.0f} merging/year), {s_event.size} splitting ({s_event.size / period:0.0f} splitting/year)",
f" with {(nb_by_network > big).sum()} network with more than {big} obs and the biggest have {nb_by_network.max()} observations ({nb_by_network[nb_by_network> big].sum()} observations cumulate)",
f" {nb_trash} observations in trash",
]
return "\n".join(infos)
def reset_index(self):
self._index_network = None
self._index_segment_track = None
self._segment_track_array = None
def find_segments_relative(self, obs, stopped=None, order=1):
"""
Find all relative segments from obs linked with merging/splitting events at a specific order.
:param int obs: index of observation after the event
:param int stopped: index of observation before the event
:param int order: order of relatives accepted
:return: all relative segments
:rtype: EddiesObservations
"""
# extraction of network where the event is
network_id = self.tracks[obs]
nw = self.network(network_id)
# indice of observation in new subnetwork
i_obs = where(nw.segment == self.segment[obs])[0][0]
if stopped is None:
return nw.relatives(i_obs, order=order)
else:
i_stopped = where(nw.segment == self.segment[stopped])[0][0]
return nw.relatives([i_obs, i_stopped], order=order)
def get_missing_indices(self, dt):
"""Find indices where observations are missing.
As network have all untracked observation in tracknumber `self.NOGROUP`,
we don't compute them
:param int,float dt: theorical delta time between 2 observations
"""
return get_missing_indices(
self.time, self.track, dt=dt, flag_untrack=True, indice_untrack=self.NOGROUP
)
def fix_next_previous_obs(self):
"""Function used after 'insert_virtual', to correct next_obs and
previous obs.
"""
fix_next_previous_obs(self.next_obs, self.previous_obs, self.virtual)
@property
def index_network(self):
if self._index_network is None:
self._index_network = build_index(self.track)
return self._index_network
@property
def index_segment_track(self):
if self._index_segment_track is None:
self._index_segment_track = build_index(self.segment_track_array)
return self._index_segment_track
def segment_size(self):
return self.index_segment_track[1] - self.index_segment_track[0]
@property
def ref_segment_track_index(self):
return self.index_segment_track[2]
@property
def ref_index(self):
return self.index_network[2]
def network_segment_size(self, id_networks=None):
"""Get number of segment by network
:return array:
"""
i0, i1, ref = build_index(self.track[self.index_segment_track[0]])
if id_networks is None:
return i1 - i0
else:
i = id_networks - ref
return i1[i] - i0[i]
def network_size(self, id_networks=None):
"""
Return size for specified network
:param list,array, None id_networks: ids to identify network
"""
if id_networks is None:
return self.index_network[1] - self.index_network[0]
else:
i = id_networks - self.index_network[2]
return self.index_network[1][i] - self.index_network[0][i]
def unique_segment_to_id(self, id_unique):
"""Return id network and id segment for a unique id
:param array id_unique:
"""
i = self.index_segment_track[0][id_unique] - self.ref_segment_track_index
return self.track[i], self.segment[i]
def segment_slice(self, id_network, id_segment):
"""
Return slice for one segment
:param int id_network: id to identify network
:param int id_segment: id to identify segment
"""
raise Exception("need to be implemented")
def network_slice(self, id_network):
"""
Return slice for one network
:param int id_network: id to identify network
"""
i = id_network - self.index_network[2]
i_start, i_stop = self.index_network[0][i], self.index_network[1][i]
return slice(i_start, i_stop)
@property
def elements(self):
elements = super().elements
elements.extend(
[
"track",
"segment",
"next_obs",
"previous_obs",
"next_cost",
"previous_cost",
]
)
return list(set(elements))
def astype(self, cls):
new = cls.new_like(self, self.shape)
for k in new.fields:
if k in self.fields:
new[k][:] = self[k][:]
new.sign_type = self.sign_type
return new
def longer_than(self, nb_day_min=-1, nb_day_max=-1):
"""
Select network on time duration
:param int nb_day_min: Minimal number of days covered by one network, if negative -> not used
:param int nb_day_max: Maximal number of days covered by one network, if negative -> not used
"""
return self.extract_with_mask(self.mask_longer_than(nb_day_min, nb_day_max))
def mask_longer_than(self, nb_day_min=-1, nb_day_max=-1):
"""
Select network on time duration
:param int nb_day_min: Minimal number of days covered by one network, if negative -> not used
:param int nb_day_max: Maximal number of days covered by one network, if negative -> not used
"""
if nb_day_max < 0:
nb_day_max = 1000000000000
mask = zeros(self.shape, dtype="bool")
t = self.time
for i, _, _ in self.iter_on(self.track):
nb = i.stop - i.start
if nb == 0:
continue
if nb_day_min <= (ptp(t[i]) + 1) <= nb_day_max:
mask[i] = True
return mask
@classmethod
def from_split_network(cls, group_dataset, indexs, **kwargs):
"""
Build a NetworkObservations object with Group dataset and indices
:param TrackEddiesObservations group_dataset: Group dataset
:param indexs: result from split_network
:return: NetworkObservations
"""
index_order = indexs.argsort(order=("group", "track", "time"))
network = cls.new_like(group_dataset, len(group_dataset), **kwargs)
network.sign_type = group_dataset.sign_type
for field in group_dataset.elements:
if field not in network.elements:
continue
network[field][:] = group_dataset[field][index_order]
network.segment[:] = indexs["track"][index_order]
# n & p must be re-indexed
n, p = indexs["next_obs"][index_order], indexs["previous_obs"][index_order]
# we add 2 for -1 index return index -1
translate = -ones(index_order.max() + 2, dtype="i4")
translate[index_order] = arange(index_order.shape[0])
network.next_obs[:] = translate[n]
network.previous_obs[:] = translate[p]
network.next_cost[:] = indexs["next_cost"][index_order]
network.previous_cost[:] = indexs["previous_cost"][index_order]
return network
def infos(self, label=""):
return f"{len(self)} obs {unique(self.segment).shape[0]} segments"
def correct_close_events(self, nb_days_max=20):
"""
Transform event where
segment A splits from segment B, then x days after segment B merges with A
to
segment A splits from segment B then x days after segment A merges with B (B will be longer)
These events have to last less than `nb_days_max` to be changed.
------------------- A
/ /
B --------------------
to
--A--
/ \
B -----------------------------------
:param float nb_days_max: maximum time to search for splitting-merging event
"""
_time = self.time
# segment used to correct and track changes
segment = self.segment_track_array.copy()
# final segment used to copy into self.segment
segment_copy = self.segment
segments_connexion = dict()
previous_obs, next_obs = self.previous_obs, self.next_obs
# record for every segment the slice, index of next obs & index of previous obs
for i, seg, _ in self.iter_on(segment):
if i.start == i.stop:
continue
i_p, i_n = previous_obs[i.start], next_obs[i.stop - 1]
segments_connexion[seg] = [i, i_p, i_n]
for seg in sorted(segments_connexion.keys()):
seg_slice, _, i_seg_n = segments_connexion[seg]
# the segment ID has to be corrected, because we may have changed it since
seg_corrected = segment[seg_slice.stop - 1]
# we keep the real segment number
seg_corrected_copy = segment_copy[seg_slice.stop - 1]
if i_seg_n == -1:
continue
# if segment is split
n_seg = segment[i_seg_n]
seg2_slice, i2_seg_p, _ = segments_connexion[n_seg]
if i2_seg_p == -1:
continue
p2_seg = segment[i2_seg_p]
# if it merges on the first in a certain time
if (p2_seg == seg_corrected) and (
_time[i_seg_n] - _time[i2_seg_p] < nb_days_max
):
my_slice = slice(i_seg_n, seg2_slice.stop)
# correct the factice segment
segment[my_slice] = seg_corrected
# correct the good segment
segment_copy[my_slice] = seg_corrected_copy
previous_obs[i_seg_n] = seg_slice.stop - 1
segments_connexion[seg_corrected][0] = my_slice
return self.sort()
def sort(self, order=("track", "segment", "time")):
"""
Sort observations
:param tuple order: order or sorting. Given to :func:`numpy.argsort`
"""
index_order = self.obs.argsort(order=order, kind="mergesort")
self.reset_index()
for field in self.fields:
self[field][:] = self[field][index_order]
nb_obs = len(self)
# we add 1 for -1 index return index -1
translate = -ones(nb_obs + 1, dtype="i4")
translate[index_order] = arange(nb_obs)
# next & previous must be re-indexed
self.next_obs[:] = translate[self.next_obs]
self.previous_obs[:] = translate[self.previous_obs]
return index_order, translate
def obs_relative_order(self, i_obs):
self.only_one_network()
return self.segment_relative_order(self.segment[i_obs])
def find_link(self, i_observations, forward=True, backward=False):
"""
Find all observations where obs `i_observation` could be
in future or past.
If forward=True, search all observations where water
from obs "i_observation" could go
If backward=True, search all observation
where water from obs `i_observation` could come from
:param int,iterable(int) i_observation:
indices of observation. Can be
int, or iterable of int.
:param bool forward, backward:
if forward, search observations after obs.
else mode==backward search before obs
"""
i_obs = (
[i_observations]
if not hasattr(i_observations, "__iter__")
else i_observations
)
segment = self.segment_track_array
previous_obs, next_obs = self.previous_obs, self.next_obs
segments_connexion = dict()
for i_slice, seg, _ in self.iter_on(segment):
if i_slice.start == i_slice.stop:
continue
i_p, i_n = previous_obs[i_slice.start], next_obs[i_slice.stop - 1]
p_seg, n_seg = segment[i_p], segment[i_n]
# dumping slice into dict
if seg not in segments_connexion:
segments_connexion[seg] = [i_slice, [], []]
else:
segments_connexion[seg][0] = i_slice
if i_p != -1:
if p_seg not in segments_connexion:
segments_connexion[p_seg] = [None, [], []]
# backward
segments_connexion[seg][2].append((i_slice.start, i_p, p_seg))
# forward
segments_connexion[p_seg][1].append((i_p, i_slice.start, seg))
if i_n != -1:
if n_seg not in segments_connexion:
segments_connexion[n_seg] = [None, [], []]
# forward
segments_connexion[seg][1].append((i_slice.stop - 1, i_n, n_seg))
# backward
segments_connexion[n_seg][2].append((i_n, i_slice.stop - 1, seg))
mask = zeros(segment.size, dtype=bool)
def func_forward(seg, indice):
seg_slice, _forward, _ = segments_connexion[seg]
mask[indice : seg_slice.stop] = True
for i_begin, i_end, seg2 in _forward:
if i_begin < indice:
continue
if not mask[i_end]:
func_forward(seg2, i_end)
def func_backward(seg, indice):
seg_slice, _, _backward = segments_connexion[seg]
mask[seg_slice.start : indice + 1] = True
for i_begin, i_end, seg2 in _backward:
if i_begin > indice:
continue
if not mask[i_end]:
func_backward(seg2, i_end)
for indice in i_obs:
if forward:
func_forward(segment[indice], indice)
if backward:
func_backward(segment[indice], indice)
return self.extract_with_mask(mask)
def connexions(self, multi_network=False):
"""Create dictionnary for each segment, gives the segments in interaction with
:param bool multi_network: use segment_track_array instead of segment, defaults to False
:return dict: Return dict of set, for each seg id we get set of segment which have event with him
"""
if multi_network:
segment = self.segment_track_array
else:
self.only_one_network()
segment = self.segment
segments_connexion = dict()
def add_seg(s1, s2):
if s1 not in segments_connexion:
segments_connexion[s1] = set()
if s2 not in segments_connexion:
segments_connexion[s2] = set()
segments_connexion[s1].add(s2), segments_connexion[s2].add(s1)
# Get index for each segment
i0, i1, _ = self.index_segment_track
i1 = i1 - 1
# Check if segment merge
i_next = self.next_obs[i1]
m_n = i_next != -1
# Check if segment come from splitting
i_previous = self.previous_obs[i0]
m_p = i_previous != -1
# For each split
for s1, s2 in zip(segment[i_previous[m_p]], segment[i0[m_p]]):
add_seg(s1, s2)
# For each merge
for s1, s2 in zip(segment[i_next[m_n]], segment[i1[m_n]]):
add_seg(s1, s2)
return segments_connexion
@classmethod
def __close_segment(cls, father, shift, connexions, distance):
i_father = father - shift
if distance[i_father] == -1:
distance[i_father] = 0
d_target = distance[i_father] + 1
for son in connexions.get(father, list()):
i_son = son - shift
d_son = distance[i_son]
if d_son == -1 or d_son > d_target:
distance[i_son] = d_target
else:
continue
cls.__close_segment(son, shift, connexions, distance)
def segment_relative_order(self, seg_origine):
"""
Compute the relative order of each segment to the chosen segment
"""
self.only_one_network()
i_s, i_e, i_ref = build_index(self.segment)
segment_connexions = self.connexions()
relative_tr = -ones(i_s.shape, dtype="i4")
self.__close_segment(seg_origine, i_ref, segment_connexions, relative_tr)
d = -ones(self.shape)
for i0, i1, v in zip(i_s, i_e, relative_tr):
if i0 == i1:
continue
d[i0:i1] = v
return d
def relatives(self, obs, order=2):
"""
Extract the segments at a certain order from multiple observations.
:param iterable,int obs:
indices of observation for relatives computation. Can be one observation (int)
or collection of observations (iterable(int))
:param int order: order of relatives wanted. 0 means only observations in obs, 1 means direct relatives, ...
:return: all segments' relatives
:rtype: EddiesObservations
"""
segment = self.segment_track_array
previous_obs, next_obs = self.previous_obs, self.next_obs
segments_connexion = dict()
for i_slice, seg, _ in self.iter_on(segment):
if i_slice.start == i_slice.stop:
continue
i_p, i_n = previous_obs[i_slice.start], next_obs[i_slice.stop - 1]
p_seg, n_seg = segment[i_p], segment[i_n]
# dumping slice into dict
if seg not in segments_connexion:
segments_connexion[seg] = [i_slice, []]
else:
segments_connexion[seg][0] = i_slice
if i_p != -1:
if p_seg not in segments_connexion:
segments_connexion[p_seg] = [None, []]
# backward
segments_connexion[seg][1].append(p_seg)
segments_connexion[p_seg][1].append(seg)
if i_n != -1:
if n_seg not in segments_connexion:
segments_connexion[n_seg] = [None, []]
# forward
segments_connexion[seg][1].append(n_seg)
segments_connexion[n_seg][1].append(seg)
i_obs = [obs] if not hasattr(obs, "__iter__") else obs
distance = zeros(segment.size, dtype=uint16) - 1
def loop(seg, dist=1):
i_slice, links = segments_connexion[seg]
d = distance[i_slice.start]
if dist < d and dist <= order:
distance[i_slice] = dist
for _seg in links:
loop(_seg, dist + 1)
for indice in i_obs:
loop(segment[indice], 0)
return self.extract_with_mask(distance <= order)
# keep old names, for backward compatibility
relative = relatives
def close_network(self, other, nb_obs_min=10, **kwargs):
"""
Get close network from another atlas.
:param self other: Atlas to compare
:param int nb_obs_min: Minimal number of overlap for one trajectory
:param dict kwargs: keyword arguments for match function
:return: return other atlas reduced to common tracks with self
.. warning::
It could be a costly operation for huge dataset
"""
p0, p1 = self.period
indexs = list()
for i_self, i_other, t0, t1 in self.align_on(other, bins=range(p0, p1 + 2)):
i, j, s = self.match(other, i_self=i_self, i_other=i_other, **kwargs)
indexs.append(other.re_reference_index(j, i_other))
indexs = concatenate(indexs)
tr, nb = unique(other.track[indexs], return_counts=True)
m = zeros(other.track.shape, dtype=bool)
for i in tr[nb >= nb_obs_min]:
m[other.network_slice(i)] = True
return other.extract_with_mask(m)
def normalize_longitude(self):
"""Normalize all longitudes
Normalize longitude field and in the same range :
- longitude_max
- contour_lon_e (how to do if in raw)
- contour_lon_s (how to do if in raw)
"""
i_start, i_stop, _ = self.index_network
lon0 = (self.lon[i_start] - 180).repeat(i_stop - i_start)
logger.debug("Normalize longitude")
self.lon[:] = (self.lon - lon0) % 360 + lon0
if "lon_max" in self.fields:
logger.debug("Normalize longitude_max")
self.lon_max[:] = (self.lon_max - self.lon + 180) % 360 + self.lon - 180
if not self.raw_data:
if "contour_lon_e" in self.fields:
logger.debug("Normalize effective contour longitude")
self.contour_lon_e[:] = (
(self.contour_lon_e.T - self.lon + 180) % 360 + self.lon - 180
).T
if "contour_lon_s" in self.fields:
logger.debug("Normalize speed contour longitude")
self.contour_lon_s[:] = (
(self.contour_lon_s.T - self.lon + 180) % 360 + self.lon - 180
).T
def numbering_segment(self, start=0):
"""
New numbering of segment
"""
for i, _, _ in self.iter_on("track"):
new_numbering(self.segment[i], start)
def numbering_network(self, start=1):
"""
New numbering of network
"""
new_numbering(self.track, start)
def only_one_network(self):
"""
Raise a warning or error?
if there are more than one network
"""
_, i_start, _ = self.index_network
if i_start.size > 1:
raise Exception("Several networks")
def position_filter(self, median_half_window, loess_half_window):
self.median_filter(median_half_window, "time", "lon").loess_filter(
loess_half_window, "time", "lon"
)
self.median_filter(median_half_window, "time", "lat").loess_filter(
loess_half_window, "time", "lat"
)
def loess_filter(self, half_window, xfield, yfield, inplace=True):
result = track_loess_filter(
half_window, self.obs[xfield], self.obs[yfield], self.segment_track_array
)
if inplace:
self.obs[yfield] = result
return self
return result
def median_filter(self, half_window, xfield, yfield, inplace=True):
result = track_median_filter(
half_window, self[xfield], self[yfield], self.segment_track_array
)
if inplace:
self[yfield][:] = result
return self
return result
def display_timeline(
self,
ax,
event=True,
field=None,
method=None,
factor=1,
colors_mode="roll",
**kwargs,
):
"""
Plot the timeline of a network.
Must be called on only one network.
:param matplotlib.axes.Axes ax: matplotlib axe used to draw
:param bool event: if True, draw the splitting and merging events
:param str,array field: yaxis values, if None, segments are used
:param str method: if None, mean values are used
:param float factor: to multiply field
:param str colors_mode:
color of lines. "roll" means looping through colors,
"y" means color adapt the y values (for matching color plots)
:return: plot mappable
"""
self.only_one_network()
j = 0
line_kw = dict(
ls="-",
marker="+",
markersize=6,
zorder=1,
lw=3,
)
line_kw.update(kwargs)
mappables = dict(lines=list())
if event:
mappables.update(
self.event_timeline(
ax,
field=field,
method=method,
factor=factor,
colors_mode=colors_mode,
)
)
if field is not None:
field = self.parse_varname(field)
for i, b0, b1 in self.iter_on("segment"):
x = self.time_datetime64[i]
if x.shape[0] == 0:
continue
if field is None:
y = b0 * ones(x.shape)
else:
if method == "all":
y = field[i] * factor
else:
y = field[i].mean() * ones(x.shape) * factor
if colors_mode == "roll":
_color = self.get_color(j)
elif colors_mode == "y":
_color = self.get_color(b0 - 1)
else:
raise NotImplementedError(f"colors_mode '{colors_mode}' not defined")
line = ax.plot(x, y, **line_kw, color=_color)[0]
mappables["lines"].append(line)
j += 1
return mappables
def event_timeline(self, ax, field=None, method=None, factor=1, colors_mode="roll"):
"""Mark events in plot"""
j = 0
events = dict(splitting=[], merging=[])
# TODO : fill mappables dict
y_seg = dict()
_time = self.time_datetime64
if field is not None and method != "all":
for i, b0, _ in self.iter_on("segment"):
y = self.parse_varname(field)[i]
if y.shape[0] != 0:
y_seg[b0] = y.mean() * factor
mappables = dict()
for i, b0, b1 in self.iter_on("segment"):
x = _time[i]
if x.shape[0] == 0:
continue
if colors_mode == "roll":
_color = self.get_color(j)
elif colors_mode == "y":
_color = self.get_color(b0 - 1)
else:
raise NotImplementedError(f"colors_mode '{colors_mode}' not defined")
event_kw = dict(color=_color, ls="-", zorder=1)
i_n, i_p = (
self.next_obs[i.stop - 1],
self.previous_obs[i.start],
)
if field is None:
y0 = b0
else:
if method == "all":
y0 = self.parse_varname(field)[i.stop - 1] * factor
else:
y0 = y_seg[b0]
if i_n != -1:
seg_next = self.segment[i_n]
y1 = (
seg_next
if field is None
else (
self.parse_varname(field)[i_n] * factor
if method == "all"
else y_seg[seg_next]
)
)
ax.plot((x[-1], _time[i_n]), (y0, y1), **event_kw)[0]
events["merging"].append((x[-1], y0))
if i_p != -1:
seg_previous = self.segment[i_p]
if field is not None and method == "all":
y0 = self[field][i.start] * factor
y1 = (
seg_previous
if field is None
else (
self.parse_varname(field)[i_p] * factor
if method == "all"
else y_seg[seg_previous]
)
)
ax.plot((x[0], _time[i_p]), (y0, y1), **event_kw)[0]
events["splitting"].append((x[0], y0))
j += 1
kwargs = dict(color="k", zorder=-1, linestyle=" ")
if len(events["splitting"]) > 0:
X, Y = list(zip(*events["splitting"]))
ref = ax.plot(
X, Y, marker="*", markersize=12, label="splitting events", **kwargs
)[0]
mappables.setdefault("events", []).append(ref)
if len(events["merging"]) > 0:
X, Y = list(zip(*events["merging"]))
ref = ax.plot(
X, Y, marker="H", markersize=10, label="merging events", **kwargs
)[0]
mappables.setdefault("events", []).append(ref)
return mappables
def mean_by_segment(self, y, **kw):
kw["dtype"] = y.dtype
return self.map_segment(lambda x: x.mean(), y, **kw)
def map_segment(self, method, y, same=True, **kw):
if same:
out = empty(y.shape, **kw)
else:
out = list()
for i, _, _ in self.iter_on(self.segment_track_array):
res = method(y[i])
if same:
out[i] = res
else:
if isinstance(i, slice):
if i.start == i.stop:
continue
elif len(i) == 0:
continue
out.append(res)
if not same:
out = array(out)
return out
def map_network(self, method, y, same=True, return_dict=False, **kw):
"""
Transform data `y` with method `method` for each track.
:param Callable method: method to apply on each track
:param np.array y: data where to apply method
:param bool same: if True, return an array with the same size than y. Else, return a list with the edited tracks
:param bool return_dict: if None, mean values are used
:param float kw: to multiply field
:return: array or dict of result from method for each network
"""
if same and return_dict:
raise NotImplementedError(
"both conditions 'same' and 'return_dict' should no be true"
)
if same:
out = empty(y.shape, **kw)
elif return_dict:
out = dict()
else:
out = list()
for i, b0, b1 in self.iter_on(self.track):
res = method(y[i])
if same:
out[i] = res
elif return_dict:
out[b0] = res
else:
if isinstance(i, slice):
if i.start == i.stop:
continue
elif len(i) == 0:
continue
out.append(res)
if not same and not return_dict:
out = array(out)
return out
def scatter_timeline(
self,
ax,
name,
factor=1,
event=True,
yfield=None,
yfactor=1,
method=None,
**kwargs,
):
"""
Must be called on only one network
"""
self.only_one_network()
y = (self.segment if yfield is None else self.parse_varname(yfield)) * yfactor
if method == "all":
pass
else:
y = self.mean_by_segment(y)
mappables = dict()
if event:
mappables.update(
self.event_timeline(ax, field=yfield, method=method, factor=yfactor)
)
if "c" not in kwargs:
v = self.parse_varname(name)
kwargs["c"] = v * factor
mappables["scatter"] = ax.scatter(self.time_datetime64, y, **kwargs)
return mappables
def event_map(self, ax, **kwargs):
"""Add the merging and splitting events to a map"""
j = 0
mappables = dict()
symbol_kw = dict(
markersize=10,
color="k",
)
symbol_kw.update(kwargs)
symbol_kw_split = symbol_kw.copy()
symbol_kw_split["markersize"] += 4
for i, b0, b1 in self.iter_on("segment"):
nb = i.stop - i.start
if nb == 0:
continue
event_kw = dict(color=self.COLORS[j % self.NB_COLORS], ls="-", **kwargs)
i_n, i_p = (
self.next_obs[i.stop - 1],
self.previous_obs[i.start],
)
if i_n != -1:
y0, y1 = self.lat[i.stop - 1], self.lat[i_n]
x0, x1 = self.lon[i.stop - 1], self.lon[i_n]
ax.plot((x0, x1), (y0, y1), **event_kw)[0]
ax.plot(x0, y0, marker="H", **symbol_kw)[0]
if i_p != -1:
y0, y1 = self.lat[i.start], self.lat[i_p]
x0, x1 = self.lon[i.start], self.lon[i_p]
ax.plot((x0, x1), (y0, y1), **event_kw)[0]
ax.plot(x0, y0, marker="*", **symbol_kw_split)[0]
j += 1
return mappables
def scatter(
self,
ax,
name="time",
factor=1,
ref=None,
edgecolor_cycle=None,
**kwargs,
):
"""
This function scatters the path of each network, with the merging and splitting events
:param matplotlib.axes.Axes ax: matplotlib axe used to draw
:param str,array,None name:
variable used to fill the contours, if None all elements have the same color
:param float,None ref: if defined, ref is used as western boundary
:param float factor: multiply value by
:param list edgecolor_cycle: list of colors
:param dict kwargs: look at :py:meth:`matplotlib.axes.Axes.scatter`
:return: a dict of scattered mappables
"""
mappables = dict()
nb_colors = len(edgecolor_cycle) if edgecolor_cycle else None
x = self.longitude
if ref is not None:
x = (x - ref) % 360 + ref
kwargs = kwargs.copy()
if nb_colors:
edgecolors = list()
seg_previous = self.segment[0]
j = 0
for seg in self.segment:
if seg != seg_previous:
j += 1
edgecolors.append(edgecolor_cycle[j % nb_colors])
seg_previous = seg
mappables["edges"] = ax.scatter(
x, self.latitude, edgecolor=edgecolors, **kwargs
)
kwargs.pop("linewidths", None)
kwargs["lw"] = 0
if name is not None and "c" not in kwargs:
v = self.parse_varname(name)
kwargs["c"] = v * factor
mappables["scatter"] = ax.scatter(x, self.latitude, **kwargs)
return mappables
def extract_event(self, indices):
nb = len(indices)
new = EddiesObservations(
nb,
track_extra_variables=self.track_extra_variables,
track_array_variables=self.track_array_variables,
array_variables=self.array_variables,
only_variables=self.only_variables,
raw_data=self.raw_data,
)
for k in new.fields:
new[k][:] = self[k][indices]
new.sign_type = self.sign_type
return new
@property
def segment_track_array(self):
"""Return a unique segment id when multiple networks are considered"""
if self._segment_track_array is None:
self._segment_track_array = build_unique_array(self.segment, self.track)
return self._segment_track_array
def birth_event(self):
"""Extract birth events."""
i_start, _, _ = self.index_segment_track
indices = i_start[self.previous_obs[i_start] == -1]
if self.first_is_trash():
indices = indices[1:]
return self.extract_event(indices)
generation_event = birth_event
def death_event(self):
"""Extract death events."""
_, i_stop, _ = self.index_segment_track
indices = i_stop[self.next_obs[i_stop - 1] == -1] - 1
if self.first_is_trash():
indices = indices[1:]
return self.extract_event(indices)
dissipation_event = death_event
def merging_event(self, triplet=False, only_index=False):
"""Return observation after a merging event.
If `triplet=True` return the eddy after a merging event, the eddy before the merging event,
and the eddy stopped due to merging.
"""
# Get start and stop for each segment, there is no empty segment
_, i1, _ = self.index_segment_track
# Get last index for each segment
i_stop = i1 - 1
# Get target index
idx_m1 = self.next_obs[i_stop]
# Get mask and valid target
m = idx_m1 != -1
idx_m1 = idx_m1[m]
# Sort by time event
i = self.time[idx_m1].argsort()
idx_m1 = idx_m1[i]
if triplet:
# Get obs before target
idx_m0_stop = i_stop[m][i]
idx_m0 = self.previous_obs[idx_m1].copy()
if triplet:
if only_index:
return idx_m1, idx_m0, idx_m0_stop
else:
return (
self.extract_event(idx_m1),
self.extract_event(idx_m0),
self.extract_event(idx_m0_stop),
)
else:
idx_m1 = unique(idx_m1)
if only_index:
return idx_m1
else:
return self.extract_event(idx_m1)
def splitting_event(self, triplet=False, only_index=False):
"""Return observation before a splitting event.
If `triplet=True` return the eddy before a splitting event, the eddy after the splitting event,
and the eddy starting due to splitting.
"""
# Get start and stop for each segment, there is no empty segment
i_start, _, _ = self.index_segment_track
# Get target index
idx_s0 = self.previous_obs[i_start]
# Get mask and valid target
m = idx_s0 != -1
idx_s0 = idx_s0[m]
# Sort by time event
i = self.time[idx_s0].argsort()
idx_s0 = idx_s0[i]
if triplet:
# Get obs after target
idx_s1_start = i_start[m][i]
idx_s1 = self.next_obs[idx_s0].copy()
if triplet:
if only_index:
return idx_s0, idx_s1, idx_s1_start
else:
return (
self.extract_event(idx_s0),
self.extract_event(idx_s1),
self.extract_event(idx_s1_start),
)
else:
idx_s0 = unique(idx_s0)
if only_index:
return idx_s0
else:
return self.extract_event(idx_s0)
def dissociate_network(self):
"""
Dissociate networks with no known interaction (splitting/merging)
"""
tags = self.tag_segment()
if self.track[0] == 0:
tags -= 1
self.track[:] = tags[self.segment_track_array]
return self.sort()
def network_segment(self, id_network, id_segment):
return self.extract_with_mask(self.segment_slice(id_network, id_segment))
def network(self, id_network):
return self.extract_with_mask(self.network_slice(id_network))
def networks_mask(self, id_networks, segment=False):
if segment:
return generate_mask_from_ids(
id_networks, self.track.size, *self.index_segment_track
)
else:
return generate_mask_from_ids(
id_networks, self.track.size, *self.index_network
)
def networks(self, id_networks):
return self.extract_with_mask(
generate_mask_from_ids(
array(id_networks), self.track.size, *self.index_network
)
)
@property
def nb_network(self):
"""
Count and return number of network
"""
return (self.network_size() != 0).sum()
@property
def nb_segment(self):
"""
Count and return number of segment in all network
"""
return self.index_segment_track[0].size
def identify_in(self, other, size_min=1, segment=False):
"""
Return couple of segment or network which are equal
:param other: other atlas to compare
:param int size_min: number of observation in network/segment
:param bool segment: segment mode
"""
if segment:
counts = self.segment_size(), other.segment_size()
i_self_ref, i_other_ref = (
self.ref_segment_track_index,
other.ref_segment_track_index,
)
var_id = "segment"
else:
counts = self.network_size(), other.network_size()
i_self_ref, i_other_ref = self.ref_index, other.ref_index
var_id = "track"
# object to contain index of couple
in_self, in_other = list(), list()
# We iterate on item of same size
for i_self, i_other, i0, _ in self.align_on(other, counts, all_ref=True):
if i0 < size_min:
continue
if isinstance(i_other, slice):
i_other = arange(i_other.start, i_other.stop)
# All_ref will give all item of self, sometime there is no things to compare with other
if i_other.size == 0:
id_self = i_self + i_self_ref
in_self.append(id_self)
in_other.append(-ones(id_self.shape, dtype=id_self.dtype))
continue
if isinstance(i_self, slice):
i_self = arange(i_self.start, i_self.stop)
# We get absolute id
id_self, id_other = i_self + i_self_ref, i_other + i_other_ref
# We compute mask to select data
m_self, m_other = self.networks_mask(id_self, segment), other.networks_mask(
id_other, segment
)
# We extract obs
obs_self, obs_other = self.obs[m_self], other.obs[m_other]
x1, y1, t1 = obs_self["lon"], obs_self["lat"], obs_self["time"]
x2, y2, t2 = obs_other["lon"], obs_other["lat"], obs_other["time"]
if segment:
ids1 = build_unique_array(obs_self["segment"], obs_self["track"])
ids2 = build_unique_array(obs_other["segment"], obs_other["track"])
label1 = self.segment_track_array[m_self]
label2 = other.segment_track_array[m_other]
else:
label1, label2 = ids1, ids2 = obs_self[var_id], obs_other[var_id]
# For each item we get index to sort
i01, indexs1, id1 = list(), List(), list()
for sl_self, id_, _ in self.iter_on(ids1):
i01.append(sl_self.start)
indexs1.append(obs_self[sl_self].argsort(order=["time", "lon", "lat"]))
id1.append(label1[sl_self.start])
i02, indexs2, id2 = list(), List(), list()
for sl_other, _, _ in other.iter_on(ids2):
i02.append(sl_other.start)
indexs2.append(
obs_other[sl_other].argsort(order=["time", "lon", "lat"])
)
id2.append(label2[sl_other.start])
id1, id2 = array(id1), array(id2)
# We search item from self in item of others
i_local_target = same_position(
x1, y1, t1, x2, y2, t2, array(i01), array(i02), indexs1, indexs2
)
# -1 => no item found in other dataset
m = i_local_target != -1
in_self.append(id1)
track2_ = -ones(id1.shape, dtype="i4")
track2_[m] = id2[i_local_target[m]]
in_other.append(track2_)
return concatenate(in_self), concatenate(in_other)
@classmethod
def __tag_segment(cls, seg, tag, groups, connexions):
"""
Will set same temporary ID for each connected segment.
:param int seg: current ID of segment
:param ing tag: temporary ID to set for segment and its connexion
:param array[int] groups: array where tag is stored
:param dict connexions: gives for one ID of segment all connected segments
"""
# If segments are already used we stop recursivity
if groups[seg] != 0:
return
# We set tag for this segment
groups[seg] = tag
# Get all connexions of this segment
segs = connexions.get(seg, None)
if segs is not None:
for seg in segs:
# For each connexion we apply same function
cls.__tag_segment(seg, tag, groups, connexions)
def tag_segment(self):
"""For each segment, method give a new network id, and all segment are connected
:return array: for each unique seg id, it return new network id
"""
nb = self.segment_track_array[-1] + 1
sub_group = zeros(nb, dtype="u4")
c = self.connexions(multi_network=True)
j = 1
# for each available id
for i in range(nb):
# No connexions, no need to explore
if i not in c:
sub_group[i] = j
j += 1
continue
# Skip if already set
if sub_group[i] != 0:
continue
# we tag an unset segments and explore all connexions
self.__tag_segment(i, j, sub_group, c)
j += 1
return sub_group
def fully_connected(self):
"""Suspicious"""
raise Exception("Must be check")
self.only_one_network()
return self.tag_segment().shape[0] == 1
def first_is_trash(self):
"""Check if first network is Trash
:return bool: True if first network is trash
"""
i_start, i_stop, _ = self.index_segment_track
sl = slice(i_start[0], i_stop[0])
return (self.previous_obs[sl] == -1).all() and (self.next_obs[sl] == -1).all()
def remove_trash(self):
"""
Remove the lonely eddies (only 1 obs in segment, associated network number is 0)
"""
if self.first_is_trash():
return self.extract_with_mask(self.track != 0)
else:
return self
def plot(self, ax, ref=None, color_cycle=None, **kwargs):
"""
This function draws the path of each trajectory
:param matplotlib.axes.Axes ax: ax to draw
:param float,int ref: if defined, all coordinates are wrapped with ref as western boundary
:param dict kwargs: keyword arguments for Axes.plot
:return: a list of matplotlib mappables
"""
kwargs = kwargs.copy()
if color_cycle is None:
color_cycle = self.COLORS
nb_colors = len(color_cycle)
mappables = list()
if "label" in kwargs:
kwargs["label"] = self.format_label(kwargs["label"])
j = 0
for i, _, _ in self.iter_on(self.segment_track_array):
nb = i.stop - i.start
if nb == 0:
continue
if nb_colors:
kwargs["color"] = color_cycle[j % nb_colors]
x, y = self.lon[i], self.lat[i]
if ref is not None:
x, y = wrap_longitude(x, y, ref, cut=True)
mappables.append(ax.plot(x, y, **kwargs)[0])
j += 1
return mappables
def remove_dead_end(self, nobs=3, ndays=0, recursive=0, mask=None):
"""
Remove short segments that don't connect several segments
:param int nobs: Minimal number of observation to keep a segment
:param int ndays: Minimal number of days to keep a segment
:param int recursive: Run method N times more
:param int mask: if one or more observation of the segment are selected by mask, the segment is kept
.. warning::
It will remove short segment that splits from then merges with the same segment
"""
connexions = self.connexions(multi_network=True)
i0, i1, _ = self.index_segment_track
dt = self.time[i1 - 1] - self.time[i0] + 1
nb = i1 - i0
m = (dt >= ndays) * (nb >= nobs)
nb_connexions = array([len(connexions.get(i, tuple())) for i in where(~m)[0]])
m[~m] = nb_connexions >= 2
segments_keep = where(m)[0]
if mask is not None:
segments_keep = unique(
concatenate((segments_keep, self.segment_track_array[mask]))
)
# get mask for selected obs
m = ~self.segment_mask(segments_keep)
self.track[m] = 0
self.segment[m] = 0
self.previous_obs[m] = -1
self.previous_cost[m] = 0
self.next_obs[m] = -1
self.next_cost[m] = 0
m_previous = m[self.previous_obs]
self.previous_obs[m_previous] = -1
self.previous_cost[m_previous] = 0
m_next = m[self.next_obs]
self.next_obs[m_next] = -1
self.next_cost[m_next] = 0
self.sort()
if recursive > 0:
self.remove_dead_end(nobs, ndays, recursive - 1)
def extract_segment(self, segments, absolute=False):
"""Extract given segments
:param array,tuple,list segments: list of segment to extract
:param bool absolute: keep for compatibility, defaults to False
:return NetworkObservations: Return observations from selected segment
"""
if not absolute:
raise Exception("Not implemented")
return self.extract_with_mask(self.segment_mask(segments))
def segment_mask(self, segments):
"""Get mask from list of segment
:param list,array segments: absolute id of segment
"""
return generate_mask_from_ids(
array(segments), len(self), *self.index_segment_track
)
def get_mask_with_period(self, period):
"""
obtain mask within a time period
:param (int,int) period: two dates to define the period, must be specified from 1/1/1950
:return: mask where period is defined
:rtype: np.array(bool)
"""
dataset_period = self.period
p_min, p_max = period
if p_min > 0:
mask = self.time >= p_min
elif p_min < 0:
mask = self.time >= (dataset_period[0] - p_min)
else:
mask = ones(self.time.shape, dtype=bool_)
if p_max > 0:
mask *= self.time <= p_max
elif p_max < 0:
mask *= self.time <= (dataset_period[1] + p_max)
return mask
def extract_with_period(self, period):
"""
Extract within a time period
:param (int,int) period: two dates to define the period, must be specified from 1/1/1950
:return: Return all eddy trajectories in period
:rtype: NetworkObservations
.. minigallery:: py_eddy_tracker.NetworkObservations.extract_with_period
"""
return self.extract_with_mask(self.get_mask_with_period(period))
def extract_light_with_mask(self, mask, track_extra_variables=[]):
"""extract data with mask, but only with variables used for coherence, aka self.array_variables
:param mask: mask used to extract
:type mask: np.array(bool)
:return: new EddiesObservation with data wanted
:rtype: self
"""
if isinstance(mask, slice):
nb_obs = mask.stop - mask.start
else:
nb_obs = mask.sum()
# only time & contour_lon/lat_e/s
variables = ["time"] + self.array_variables
new = self.__class__(
size=nb_obs,
track_extra_variables=track_extra_variables,
track_array_variables=self.track_array_variables,
array_variables=self.array_variables,
only_variables=variables,
raw_data=self.raw_data,
)
new.sign_type = self.sign_type
if nb_obs == 0:
logger.info("Empty dataset will be created")
else:
logger.info(
f"{nb_obs} observations will be extracted ({nb_obs / self.shape[0]:.3%})"
)
for field in variables + track_extra_variables:
logger.debug("Copy of field %s ...", field)
new.obs[field] = self.obs[field][mask]
if (
"previous_obs" in track_extra_variables
and "next_obs" in track_extra_variables
):
# n & p must be re-index
n, p = self.next_obs[mask], self.previous_obs[mask]
# we add 2 for -1 index return index -1
translate = -ones(len(self) + 1, dtype="i4")
translate[:-1][mask] = arange(nb_obs)
new.next_obs[:] = translate[n]
new.previous_obs[:] = translate[p]
return new
def extract_with_mask(self, mask):
"""
Extract a subset of observations.
:param array(bool) mask: mask to select observations
:return: same object with selected observations
:rtype: self
"""
if isinstance(mask, slice):
nb_obs = mask.stop - mask.start
else:
nb_obs = mask.sum()
new = self.__class__.new_like(self, nb_obs)
new.sign_type = self.sign_type
if nb_obs == 0:
logger.info("Empty dataset will be created")
else:
logger.debug(
f"{nb_obs} observations will be extracted ({nb_obs / self.shape[0]:.3%})"
)
for field in self.fields:
if field in ("next_obs", "previous_obs"):
continue
logger.debug("Copy of field %s ...", field)
new.obs[field] = self.obs[field][mask]
# n & p must be re-index
n, p = self.next_obs[mask], self.previous_obs[mask]
# we add 2 for -1 index return index -1
translate = -ones(len(self) + 1, dtype="i4")
translate[:-1][mask] = arange(nb_obs)
new.next_obs[:] = translate[n]
new.previous_obs[:] = translate[p]
return new
def analysis_coherence(
self,
date_function,
uv_params,
advection_mode="both",
n_days=14,
step_mesh=1.0 / 50,
output_name=None,
dissociate_network=False,
correct_close_events=0,
remove_dead_end=0,
):
"""Global function to analyse segments coherence, with network preprocessing.
:param callable date_function: python function, takes as param `int` (julian day) and return
data filename associated to the date
:param dict uv_params: dict of parameters used by
:py:meth:`~py_eddy_tracker.dataset.grid.GridCollection.from_netcdf_list`
:param int n_days: nuber of days for advection
:param float step_mesh: step for particule mesh in degrees
:param str output_name: path/name for the output (without extension) to store the clean
network in .nc and the coherence results in .zarr. Works only for advection_mode = "both"
:param bool dissociate_network: If True apply
:py:meth:`~py_eddy_tracker.observation.network.NetworkObservations.dissociate_network`
:param int correct_close_events: Number of days in
:py:meth:`~py_eddy_tracker.observation.network.NetworkObservations.correct_close_events`
:param int remove_dead_end: Number of days in
:py:meth:`~py_eddy_tracker.observation.network.NetworkObservations.remove_dead_end`
:return target_forward, target_bakward: 2D numpy.array with the eddy observation the
particles ended in after advection
:return target_forward, target_bakward: percentage of ending particles within the
eddy observation with regards to the starting number
"""
if dissociate_network:
self.dissociate_network()
if correct_close_events > 0:
self.correct_close_events(nb_days_max=correct_close_events)
if remove_dead_end > 0:
network_clean = self.remove_dead_end(nobs=0, ndays=remove_dead_end)
else:
network_clean = self
network_clean.numbering_segment()
res = []
if (advection_mode == "both") | (advection_mode == "forward"):
target_forward, pct_forward = network_clean.segment_coherence_forward(
date_function=date_function,
uv_params=uv_params,
n_days=n_days,
step_mesh=step_mesh,
)
res = res + [target_forward, pct_forward]
if (advection_mode == "both") | (advection_mode == "backward"):
target_backward, pct_backward = network_clean.segment_coherence_backward(
date_function=date_function,
uv_params=uv_params,
n_days=n_days,
step_mesh=step_mesh,
)
res = res + [target_backward, pct_backward]
if (output_name is not None) & (advection_mode == "both"):
# TODO : put some path verification?
# Save the clean network in netcdf
with netCDF4.Dataset(output_name + ".nc", "w") as fh:
network_clean.to_netcdf(fh)
# Save the results of particles advection in zarr
# zarr compression parameters
# TODO : check size? compression?
params_seg = dict()
params_pct = dict()
zg = zarr.open(output_name + ".zarr", mode="w")
zg.array("target_forward", target_forward, **params_seg)
zg.array("pct_forward", pct_forward, **params_pct)
zg.array("target_backward", target_backward, **params_seg)
zg.array("pct_backward", pct_backward, **params_pct)
return network_clean, res
def segment_coherence_backward(
self,
date_function,
uv_params,
n_days=14,
step_mesh=1.0 / 50,
contour_start="speed",
contour_end="speed",
):
"""
Percentage of particules and their targets after backward advection from a specific eddy.
:param callable date_function: python function, takes as param `int` (julian day) and return
data filename associated to the date (see note)
:param dict uv_params: dict of parameters used by
:py:meth:`~py_eddy_tracker.dataset.grid.GridCollection.from_netcdf_list`
:param int n_days: days for advection
:param float step_mesh: step for particule mesh in degrees
:return: observations matchs, and percents
.. note:: the param `date_function` should be something like :
.. code-block:: python
def date2file(julian_day):
date = datetime.timedelta(days=julian_day) + datetime.datetime(
1950, 1, 1
)
return f"/tmp/dt_global_{date.strftime('%Y%m%d')}.nc"
"""
shape = len(self), 2
itb_final = -ones(shape, dtype="i4")
ptb_final = zeros(shape, dtype="i1")
t_start, t_end = int(self.period[0]), int(self.period[1])
# dates = arange(t_start, t_start + n_days + 1)
dates = arange(t_start, min(t_start + n_days + 1, t_end + 1))
first_files = [date_function(x) for x in dates]
c = GridCollection.from_netcdf_list(first_files, dates, **uv_params)
first = True
range_start = t_start + n_days
range_end = t_end + 1
for _t in range(t_start + n_days, t_end + 1):
_timestamp = time.time()
t_shift = _t
# skip first shift, because already included
if first:
first = False
else:
# add next date to GridCollection and delete last date
c.shift_files(t_shift, date_function(int(t_shift)), **uv_params)
particle_candidate(
c,
self,
step_mesh,
_t,
itb_final,
ptb_final,
n_days=-n_days,
contour_start=contour_start,
contour_end=contour_end,
)
logger.info(
(
f"coherence {_t} / {range_end-1} ({(_t - range_start) / (range_end - range_start-1):.1%})"
f" : {time.time()-_timestamp:5.2f}s"
)
)
return itb_final, ptb_final
def segment_coherence_forward(
self,
date_function,
uv_params,
n_days=14,
step_mesh=1.0 / 50,
contour_start="speed",
contour_end="speed",
**kwargs,
):
"""
Percentage of particules and their targets after forward advection from a specific eddy.
:param callable date_function: python function, takes as param `int` (julian day) and return
data filename associated to the date (see note)
:param dict uv_params: dict of parameters used by
:py:meth:`~py_eddy_tracker.dataset.grid.GridCollection.from_netcdf_list`
:param int n_days: days for advection
:param float step_mesh: step for particule mesh in degrees
:return: observations matchs, and percents
.. note:: the param `date_function` should be something like :
.. code-block:: python
def date2file(julian_day):
date = datetime.timedelta(days=julian_day) + datetime.datetime(
1950, 1, 1
)
return f"/tmp/dt_global_{date.strftime('%Y%m%d')}.nc"
"""
shape = len(self), 2
itf_final = -ones(shape, dtype="i4")
ptf_final = zeros(shape, dtype="i1")
t_start, t_end = int(self.period[0]), int(self.period[1])
dates = arange(t_start, min(t_start + n_days + 1, t_end + 1))
first_files = [date_function(x) for x in dates]
c = GridCollection.from_netcdf_list(first_files, dates, **uv_params)
first = True
range_start = t_start
range_end = t_end - n_days + 1
for _t in range(range_start, range_end):
_timestamp = time.time()
t_shift = _t + n_days
# skip first shift, because already included
if first:
first = False
else:
# add next date to GridCollection and delete last date
c.shift_files(t_shift, date_function(int(t_shift)), **uv_params)
particle_candidate(
c,
self,
step_mesh,
_t,
itf_final,
ptf_final,
n_days=n_days,
contour_start=contour_start,
contour_end=contour_end,
**kwargs,
)
logger.info(
(
f"coherence {_t} / {range_end-1} ({(_t - range_start) / (range_end - range_start-1):.1%})"
f" : {time.time()-_timestamp:5.2f}s"
)
)
return itf_final, ptf_final
def mask_obs_close_event(self, merging=True, spliting=True, dt=3):
"""Build a mask of close observation from event
:param n: Network
:param bool merging: select merging event, defaults to True
:param bool spliting: select splitting event, defaults to True
:param int dt: delta of time max , defaults to 3
:return array: mask
"""
m = zeros(len(self), dtype="bool")
if merging:
i_target, ip1, ip2 = self.merging_event(triplet=True, only_index=True)
mask_follow_obs(m, self.previous_obs, self.time, ip1, dt)
mask_follow_obs(m, self.previous_obs, self.time, ip2, dt)
mask_follow_obs(m, self.next_obs, self.time, i_target, dt)
if spliting:
i_target, in1, in2 = self.splitting_event(triplet=True, only_index=True)
mask_follow_obs(m, self.next_obs, self.time, in1, dt)
mask_follow_obs(m, self.next_obs, self.time, in2, dt)
mask_follow_obs(m, self.previous_obs, self.time, i_target, dt)
return m
def swap_track(
self,
length_main_max_after_event=2,
length_secondary_min_after_event=10,
delta_pct_max=-0.2,
):
events = self.splitting_event(triplet=True, only_index=True)
count = 0
for i_main, i1, i2 in zip(*events):
seg_main, _, seg2 = (
self.segment_track_array[i_main],
self.segment_track_array[i1],
self.segment_track_array[i2],
)
i_start, i_end, i0 = self.index_segment_track
# For splitting
last_index_main = i_end[seg_main - i0] - 1
last_index_secondary = i_end[seg2 - i0] - 1
last_main_next_obs = self.next_obs[last_index_main]
t_event, t_main_end, t_secondary_start, t_secondary_end = (
self.time[i_main],
self.time[last_index_main],
self.time[i2],
self.time[last_index_secondary],
)
dt_main, dt_secondary = (
t_main_end - t_event,
t_secondary_end - t_secondary_start,
)
delta_cost = self.previous_cost[i2] - self.previous_cost[i1]
if (
dt_main <= length_main_max_after_event
and dt_secondary >= length_secondary_min_after_event
and last_main_next_obs == -1
and delta_cost > delta_pct_max
):
self.segment[i1 : last_index_main + 1] = self.segment[i2]
self.segment[i2 : last_index_secondary + 1] = self.segment[i_main]
count += 1
logger.info("%d segmnent swap on %d", count, len(events[0]))
return self.sort()
class Network:
__slots__ = (
"window",
"filenames",
"nb_input",
"buffer",
"memory",
)
NOGROUP = TrackEddiesObservations.NOGROUP
def __init__(self, input_regex, window=5, intern=False, memory=False):
"""
Class to group observations by network
"""
self.window = window
self.buffer = Buffer(window, intern, memory)
self.memory = memory
self.filenames = glob(input_regex)
self.filenames.sort()
self.nb_input = len(self.filenames)
@classmethod
def from_eddiesobservations(cls, observations, *args, **kwargs):
new = cls("", *args, **kwargs)
new.filenames = observations
new.nb_input = len(new.filenames)
return new
def get_group_array(self, results, nb_obs):
"""With a loop on all pair of index, we will label each obs with a group
number
"""
nb_obs = array(nb_obs, dtype="u4")
day_start = nb_obs.cumsum() - nb_obs
gr = empty(nb_obs.sum(), dtype="u4")
gr[:] = self.NOGROUP
merge_id = list()
id_free = 1
for i, j, ii, ij in results:
gr_i = gr[slice(day_start[i], day_start[i] + nb_obs[i])]
gr_j = gr[slice(day_start[j], day_start[j] + nb_obs[j])]
# obs with no groups
m = (gr_i[ii] == self.NOGROUP) * (gr_j[ij] == self.NOGROUP)
nb_new = m.sum()
gr_i[ii[m]] = gr_j[ij[m]] = arange(id_free, id_free + nb_new)
id_free += nb_new
# associate obs with no group with obs with group
m = (gr_i[ii] != self.NOGROUP) * (gr_j[ij] == self.NOGROUP)
gr_j[ij[m]] = gr_i[ii[m]]
m = (gr_i[ii] == self.NOGROUP) * (gr_j[ij] != self.NOGROUP)
gr_i[ii[m]] = gr_j[ij[m]]
# case where 2 obs have a different group
m = gr_i[ii] != gr_j[ij]
if m.any():
# Merge of group, ref over etu
for i_, j_ in zip(ii[m], ij[m]):
g0, g1 = gr_i[i_], gr_j[j_]
if g0 > g1:
g0, g1 = g1, g0
merge_id.append((g0, g1))
gr_transfer = self.group_translator(id_free, set(merge_id))
return gr_transfer[gr]
@staticmethod
def group_translator(nb, duos):
"""
Create a translator with all duos
:param int nb: size of translator
:param set((int, int)) duos: set of all groups that must be joined
:Example:
>>> NetworkObservations.group_translator(5, ((0, 1), (0, 2), (1, 3)))
[3, 3, 3, 3, 5]
"""
translate = arange(nb, dtype="u4")
for i, j in sorted(duos):
gr_i, gr_j = translate[i], translate[j]
if gr_i != gr_j:
apply_replace(translate, gr_i, gr_j)
return translate
def group_observations(self, min_overlap=0.2, minimal_area=False, **kwargs):
"""Store every interaction between identifications
:param bool minimal_area: If True, function will compute intersection/little polygon, else intersection/union, by default False
:param float min_overlap: minimum overlap area to associate observations, by default 0.2
:return:
:rtype: TrackEddiesObservations
"""
results, nb_obs = list(), list()
# To display print only in INFO
display_iteration = logger.getEffectiveLevel() == logging.INFO
for i, filename in enumerate(self.filenames):
if display_iteration:
print(f"{filename} compared to {self.window} next", end="\r")
# Load observations with function to buffer observations
xi, yi = self.buffer.load_contour(filename)
# Append number of observations by filename
nb_obs.append(xi.shape[0])
for j in range(i + 1, min(self.window + i + 1, self.nb_input)):
xj, yj = self.buffer.load_contour(self.filenames[j])
ii, ij = bbox_intersection(xi, yi, xj, yj)
m = (
vertice_overlap(
xi[ii],
yi[ii],
xj[ij],
yj[ij],
minimal_area=minimal_area,
min_overlap=min_overlap,
**kwargs,
)
!= 0
)
results.append((i, j, ii[m], ij[m]))
if display_iteration:
print()
gr = self.get_group_array(results, nb_obs)
nb_alone, nb_obs, nb_gr = (gr == self.NOGROUP).sum(), len(gr), len(unique(gr))
logger.info(
f"{nb_alone} alone / {nb_obs} obs, {nb_gr} groups, "
f"{nb_alone *100./nb_obs:.2f} % alone, {(nb_obs - nb_alone) / (nb_gr - 1):.1f} obs/group"
)
return gr
def build_dataset(self, group, raw_data=True):
nb_obs = group.shape[0]
model = TrackEddiesObservations.load_file(self.filenames[-1], raw_data=raw_data)
eddies = TrackEddiesObservations.new_like(model, nb_obs)
eddies.sign_type = model.sign_type
# Get new index to re-order observations by groups
new_i = get_next_index(group)
display_iteration = logger.getEffectiveLevel() == logging.INFO
elements = eddies.elements
i = 0
for filename in self.filenames:
if display_iteration:
print(f"Load {filename} to copy", end="\r")
if self.memory:
# Only if netcdf
with open(filename, "rb") as h:
e = TrackEddiesObservations.load_file(h, raw_data=raw_data)
else:
e = TrackEddiesObservations.load_file(filename, raw_data=raw_data)
stop = i + len(e)
sl = slice(i, stop)
for element in elements:
eddies[element][new_i[sl]] = e[element]
i = stop
if display_iteration:
print()
eddies.track[new_i] = group
return eddies
@njit(cache=True)
def get_percentile_on_following_obs(
i, indexs, percents, follow_obs, t, segment, i_target, window, q=50, nb_min=1
):
"""Get stat on a part of segment close of an event
:param int i: index to follow
:param array indexs: indexs from coherence
:param array percents: percent from coherence
:param array[int] follow_obs: give index for the following observation
:param array t: time for each observation
:param array segment: segment for each observation
:param int i_target: index of target
:param int window: time window of search
:param int q: Percentile from 0 to 100, defaults to 50
:param int nb_min: Number minimal of observation to provide statistics, defaults to 1
:return float : return statistic
"""
last_t, segment_follow = t[i], segment[i]
segment_target = segment[i_target]
percent_target = empty(window, dtype=percents.dtype)
j = 0
while abs(last_t - t[i]) < window and i != -1 and segment_follow == segment[i]:
# Iter on primary & secondary
for index, percent in zip(indexs[i], percents[i]):
if index != -1 and segment[index] == segment_target:
percent_target[j] = percent
j += 1
i = follow_obs[i]
if j < nb_min:
return nan
return percentile(percent_target[:j], q)
@njit(cache=True)
def get_percentile_around_event(
i,
i1,
i2,
ind,
pct,
follow_obs,
t,
segment,
window=10,
follow_parent=False,
q=50,
nb_min=1,
):
"""Get stat around event
:param array[int] i: Indexs of target
:param array[int] i1: Indexs of primary origin
:param array[int] i2: Indexs of secondary origin
:param array ind: indexs from coherence
:param array pct: percent from coherence
:param array[int] follow_obs: give index for the following observation
:param array t: time for each observation
:param array segment: segment for each observation
:param int window: time window of search, defaults to 10
:param bool follow_parent: Follow parent instead of child, defaults to False
:param int q: Percentile from 0 to 100, defaults to 50
:param int nb_min: Number minimal of observation to provide statistics, defaults to 1
:return (array,array) : statistic for each event
"""
stat1 = empty(i.size, dtype=nb_types.float32)
stat2 = empty(i.size, dtype=nb_types.float32)
# iter on event
for j, (i_, i1_, i2_) in enumerate(zip(i, i1, i2)):
if follow_parent:
# We follow parent
stat1[j] = get_percentile_on_following_obs(
i_, ind, pct, follow_obs, t, segment, i1_, window, q, nb_min
)
stat2[j] = get_percentile_on_following_obs(
i_, ind, pct, follow_obs, t, segment, i2_, window, q, nb_min
)
else:
# We follow child
stat1[j] = get_percentile_on_following_obs(
i1_, ind, pct, follow_obs, t, segment, i_, window, q, nb_min
)
stat2[j] = get_percentile_on_following_obs(
i2_, ind, pct, follow_obs, t, segment, i_, window, q, nb_min
)
return stat1, stat2
@njit(cache=True)
def get_next_index(gr):
"""Return for each obs index the new position to join all groups"""
nb_obs_gr = bincount(gr)
i_gr = nb_obs_gr.cumsum() - nb_obs_gr
new_index = empty(gr.shape, dtype=uint32)
for i, g in enumerate(gr):
new_index[i] = i_gr[g]
i_gr[g] += 1
return new_index
@njit(cache=True)
def apply_replace(x, x0, x1):
nb = x.shape[0]
for i in range(nb):
if x[i] == x0:
x[i] = x1
@njit(cache=True)
def build_unique_array(id1, id2):
"""Give a unique id for each (id1, id2) with id1 and id2 increasing monotonically"""
k = 0
new_id = empty(id1.shape, dtype=id1.dtype)
id1_previous = id1[0]
id2_previous = id2[0]
for i in range(id1.shape[0]):
id1_, id2_ = id1[i], id2[i]
if id1_ != id1_previous or id2_ != id2_previous:
k += 1
new_id[i] = k
id1_previous, id2_previous = id1_, id2_
return new_id
@njit(cache=True)
def new_numbering(segs, start=0):
nb = len(segs)
s0 = segs[0]
j = start
for i in range(nb):
if segs[i] != s0:
s0 = segs[i]
j += 1
segs[i] = j
@njit(cache=True)
def ptp(values):
return values.max() - values.min()
@njit(cache=True)
def generate_mask_from_ids(id_networks, nb, istart, iend, i0):
"""From list of id, we generate a mask
:param array id_networks: list of ids
:param int nb: size of mask
:param array istart: first index for each id from :py:meth:`~py_eddy_tracker.generic.build_index`
:param array iend: last index for each id from :py:meth:`~py_eddy_tracker.generic.build_index`
:param int i0: ref index from :py:meth:`~py_eddy_tracker.generic.build_index`
:return array: return a mask
"""
m = zeros(nb, dtype="bool")
for i in id_networks:
for j in range(istart[i - i0], iend[i - i0]):
m[j] = True
return m
@njit(cache=True)
def same_position(x0, y0, t0, x1, y1, t1, i00, i01, i0, i1):
"""Return index of track/segment found in other dataset
:param array x0:
:param array y0:
:param array t0:
:param array x1:
:param array y1:
:param array t1:
:param array i00: First index of track/segment/network in dataset0
:param array i01: First index of track/segment/network in dataset1
:param List(array) i0: list of array which contain index to order dataset0
:param List(array) i1: list of array which contain index to order dataset1
:return array: index of dataset1 which match with dataset0, -1 => no match
"""
nb0, nb1 = i00.size, i01.size
i_target = -ones(nb0, dtype="i4")
# To avoid to compare multiple time, if already match
used1 = zeros(nb1, dtype="bool")
for j0 in range(nb0):
for j1 in range(nb1):
if used1[j1]:
continue
test = True
for i0_, i1_ in zip(i0[j0], i1[j1]):
i0_ += i00[j0]
i1_ += i01[j1]
if t0[i0_] != t1[i1_] or x0[i0_] != x1[i1_] or y0[i0_] != y1[i1_]:
test = False
break
if test:
i_target[j0] = j1
used1[j1] = True
break
return i_target
@njit(cache=True)
def mask_follow_obs(m, next_obs, time, indexs, dt=3):
"""Generate a mask to select close obs in time from index
:param array m: mask to fill with True
:param array next_obs: index of the next observation
:param array time: time of each obs
:param array indexs: index to start follow
:param int dt: delta of time max from index, defaults to 3
"""
for i in indexs:
t0 = time[i]
m[i] = True
i_next = next_obs[i]
dt_ = abs(time[i_next] - t0)
while dt_ < dt and i_next != -1:
m[i_next] = True
i_next = next_obs[i_next]
dt_ = abs(time[i_next] - t0)
| [] |
2024-01-10 | tedai-hackathon/ALEX-UI | alex~prompts~legal_prompt.py | from langchain.prompts.prompt import PromptTemplate
legal_prompt_string = """
You are a legal consultant for a small startup in its formation stage.
Given:
- JSON object describing a legal entity that this startup aspires
to be.
- The founder's question.
Answer the founder's question in MARKDOWN format. You will have access to
a vector database with supplemental information.
If the JSON is not defined, then the startup founder has not yet decided
on a legal entity and may need extra guidance.
#### START STARTUP LEGAL ENTITY JSON OBJECT
{legal_entity_json}
#### END STARTUP LEGAL ENTITY JSON OBJECT
#### START FOUNDER'S QUESTION
{founder_question}
#### END FOUNDER'S QUESTION
"""
legal_prompt_template = PromptTemplate(
input_variables=["legal_entity_json", "founder_question"],
template=legal_prompt_string,
)
| [
"legal_entity_json",
"\nYou are a legal consultant for a small startup in its formation stage.\nGiven:\n- JSON object describing a legal entity that this startup aspires\nto be.\n- The founder's question.\nAnswer the founder's question in MARKDOWN format. You will have access to \na vector database with supplemental information.\n\nIf the JSON is not defined, then the startup founder has not yet decided\non a legal entity and may need extra guidance.\n\n#### START STARTUP LEGAL ENTITY JSON OBJECT\n{legal_entity_json}\n#### END STARTUP LEGAL ENTITY JSON OBJECT\n\n#### START FOUNDER'S QUESTION\n{founder_question}\n#### END FOUNDER'S QUESTION\n",
"founder_question"
] |
2024-01-10 | hemants1703/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | hemants1703/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | liuxing9848/ChatPaper-1 | chat_paper.py | import argparse
import base64
import configparser
import datetime
import json
import os
import re
from collections import namedtuple
import arxiv
import numpy as np
import openai
import requests
import tenacity
import tiktoken
from get_paper_from_pdf import Paper
PaperParams = namedtuple(
"PaperParams",
[
"pdf_path",
"query",
"key_word",
"filter_keys",
"max_results",
"sort",
"save_image",
"file_format",
"language",
],
)
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("筛选后剩下的论文数量:")
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ',
'').replace(
':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str + '.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name + '-' + date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/' + self.config.get('Gitee', 'owner') + '/' + self.config.get('Gitee',
'repo') + '/contents/' + self.config.get(
'Gitee', 'path') + '/' + path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/" + self.config.get('Gitee', 'owner') + '/' + self.config.get(
'Gitee', 'repo') + '/contents/' + self.config.get('Gitee', 'path') + '/' + path
return image_url
def summary_with_chat(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += 'Url:' + paper.url
text += 'Abstrat:' + paper.abs
text += 'Paper_info:' + paper.section_text_dict['paper_info']
# intro
text += list(paper.section_text_dict.values())[0]
chat_summary_text = ""
try:
chat_summary_text = self.chat_summary(text=text)
except Exception as e:
print("summary_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
summary_prompt_token = offset + 1000 + 150
chat_summary_text = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token)
htmls.append('## Paper:' + str(paper_index + 1))
htmls.append('\n\n\n')
htmls.append(chat_summary_text)
# 第二步总结方法:
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
method_key = ''
for parse_key in paper.section_text_dict.keys():
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
method_key = parse_key
break
if method_key != '':
text = ''
method_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text
# methods
method_text += paper.section_text_dict[method_key]
text = summary_text + "\n\n<Methods>:\n\n" + method_text
chat_method_text = ""
try:
chat_method_text = self.chat_method(text=text)
except Exception as e:
print("method_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
method_prompt_token = offset + 800 + 150
chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token)
htmls.append(chat_method_text)
else:
chat_method_text = ''
htmls.append("\n" * 4)
# 第三步总结全文,并打分:
conclusion_key = ''
for parse_key in paper.section_text_dict.keys():
if 'conclu' in parse_key.lower():
conclusion_key = parse_key
break
text = ''
conclusion_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n <Method summary>:\n" + chat_method_text
if conclusion_key != '':
# conclusion
conclusion_text += paper.section_text_dict[conclusion_key]
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
else:
text = summary_text
chat_conclusion_text = ""
try:
chat_conclusion_text = self.chat_conclusion(text=text)
except Exception as e:
print("conclusion_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
conclusion_prompt_token = offset + 800 + 150
chat_conclusion_text = self.chat_conclusion(text=text,
conclusion_prompt_token=conclusion_prompt_token)
htmls.append(chat_conclusion_text)
htmls.append("\n" * 4)
# # 整合成一个文件,打包保存下来。
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join(self.root_path, 'export')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path,
date_str + '-' + self.validateTitle(paper.title[:80]) + "." + self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text, conclusion_prompt_token=800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - conclusion_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a reviewer in the field of [" + self.key_word + "] and you need to critically review this article"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:" + clip_text},
# 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
- (1):What is the significance of this piece of work?
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
.......
Follow the format of the output later:
8. Conclusion: \n\n
- (1):xxx;\n
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("conclusion_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text, method_prompt_token=800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - method_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions." + clip_text},
# 背景知识
{"role": "user", "content": """
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.
- (1):...
- (2):...
- (3):...
- .......
Follow the format of the output that follows:
7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("method_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text, summary_prompt_token=1100):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - summary_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
{"role": "assistant",
"content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: " + clip_text},
{"role": "user", "content": """
1. Mark the title of the paper (with Chinese translation)
2. list all the authors' names (use English)
3. mark the first author's affiliation (output {} translation only)
4. mark the keywords of this article (use English)
5. link to the paper, Github code link (if available, fill in Github:None if not)
6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
Follow the format of the output that follows:
1. Title: xxx\n\n
2. Authors: xxx\n\n
3. Affiliation: xxx\n\n
4. Keywords: xxx\n\n
5. Urls: xxx or xxx , xxx \n\n
6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
""".format(self.language, self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("summary_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def chat_paper_main(args):
# 创建一个Reader对象,并调用show_info方法
if args.sort == 'Relevance':
sort = arxiv.SortCriterion.Relevance
elif args.sort == 'LastUpdatedDate':
sort = arxiv.SortCriterion.LastUpdatedDate
else:
sort = arxiv.SortCriterion.Relevance
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
# 开始判断是路径还是文件:
paper_list = []
if args.pdf_path.endswith(".pdf"):
paper_list.append(Paper(path=args.pdf_path))
else:
for root, dirs, files in os.walk(args.pdf_path):
print("root:", root, "dirs:", dirs, 'files:', files) # 当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
reader1.summary_with_chat(paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--pdf_path", type=str, default=r'demo.pdf', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default=r'C:\Users\Administrator\Desktop\DHER\RHER_Reset\ChatPaper', help="if none, the bot will download from arxiv with query")
parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='all: ChatGPT robot',
help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='reinforcement learning',
help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default='ChatGPT robot',
help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
# arxiv.SortCriterion.Relevance
parser.add_argument("--sort", type=str, default="Relevance", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False,
help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='zh', help="The other output lauguage is English, is en")
paper_args = PaperParams(**vars(parser.parse_args()))
import time
start_time = time.time()
chat_paper_main(args=paper_args)
print("summary time:", time.time() - start_time)
| [
" \n 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.\n - (1):...\n - (2):...\n - (3):...\n - .......\n Follow the format of the output that follows: \n 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n ....... \n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] and you need to critically review this article",
"This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions.PLACEHOLDER",
" \n 1. Mark the title of the paper (with Chinese translation)\n 2. list all the authors' names (use English)\n 3. mark the first author's affiliation (output {} translation only) \n 4. mark the keywords of this article (use English)\n 5. link to the paper, Github code link (if available, fill in Github:None if not)\n 6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)\n - (1):What is the research background of this article?\n - (2):What are the past methods? What are the problems with them? Is the approach well motivated?\n - (3):What is the research methodology proposed in this paper?\n - (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?\n Follow the format of the output that follows: \n 1. Title: xxx\n\n\n 2. Authors: xxx\n\n\n 3. Affiliation: xxx\n\n \n 4. Keywords: xxx\n\n \n 5. Urls: xxx or xxx , xxx \n\n \n 6. Summary: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n - (4):xxx.\n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. \n ",
" \n 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).\n - (1):What is the significance of this piece of work?\n - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. \n .......\n Follow the format of the output later: \n 8. Conclusion: \n\n\n - (1):xxx;\n \n - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] who is good at summarizing papers using concise statements",
"This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:PLACEHOLDER",
"This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: PLACEHOLDER",
"You are a researcher in the field of [",
"You are a reviewer in the field of ["
] |
2024-01-10 | liuxing9848/ChatPaper-1 | chat_arxiv.py | import argparse
import base64
import configparser
import datetime
import io
import json
import os
import re
from collections import namedtuple
import arxiv
import fitz
import numpy as np
import openai
# 导入所需的库
import requests
import tenacity
import tiktoken
from bs4 import BeautifulSoup
from PIL import Image
ArxivParams = namedtuple(
"ArxivParams",
[
"query",
"key_word",
"page_num",
"max_results",
"days",
"sort",
"save_image",
"file_format",
"language",
],
)
class Paper:
def __init__(self, path, title='', url='', abs='', authers=[]):
# 初始化函数,根据pdf路径初始化Paper对象
self.url = url # 文章链接
self.path = path # pdf路径
self.section_names = [] # 段落标题
self.section_texts = {} # 段落内容
self.abs = abs
self.title_page = 0
self.title = title
self.pdf = fitz.open(self.path) # pdf文档
self.parse_pdf()
self.authers = authers
self.roman_num = ["I", "II", 'III', "IV", "V", "VI", "VII", "VIII", "IIX", "IX", "X"]
self.digit_num = [str(d + 1) for d in range(10)]
self.first_image = ''
def parse_pdf(self):
self.pdf = fitz.open(self.path) # pdf文档
self.text_list = [page.get_text() for page in self.pdf]
self.all_text = ' '.join(self.text_list)
self.section_page_dict = self._get_all_page_index() # 段落与页码的对应字典
print("section_page_dict", self.section_page_dict)
self.section_text_dict = self._get_all_page() # 段落与内容的对应字典
self.section_text_dict.update({"title": self.title})
self.section_text_dict.update({"paper_info": self.get_paper_info()})
self.pdf.close()
def get_paper_info(self):
first_page_text = self.pdf[self.title_page].get_text()
if "Abstract" in self.section_text_dict.keys():
abstract_text = self.section_text_dict['Abstract']
else:
abstract_text = self.abs
first_page_text = first_page_text.replace(abstract_text, "")
return first_page_text
def get_image_path(self, image_path=''):
"""
将PDF中的第一张图保存到image.png里面,存到本地目录,返回文件名称,供gitee读取
:param filename: 图片所在路径,"C:\\Users\\Administrator\\Desktop\\nwd.pdf"
:param image_path: 图片提取后的保存路径
:return:
"""
# open file
max_size = 0
image_list = []
with fitz.Document(self.path) as my_pdf_file:
# 遍历所有页面
for page_number in range(1, len(my_pdf_file) + 1):
# 查看独立页面
page = my_pdf_file[page_number - 1]
# 查看当前页所有图片
images = page.get_images()
# 遍历当前页面所有图片
for image_number, image in enumerate(page.get_images(), start=1):
# 访问图片xref
xref_value = image[0]
# 提取图片信息
base_image = my_pdf_file.extract_image(xref_value)
# 访问图片
image_bytes = base_image["image"]
# 获取图片扩展名
ext = base_image["ext"]
# 加载图片
image = Image.open(io.BytesIO(image_bytes))
image_size = image.size[0] * image.size[1]
if image_size > max_size:
max_size = image_size
image_list.append(image)
for image in image_list:
image_size = image.size[0] * image.size[1]
if image_size == max_size:
image_name = f"image.{ext}"
im_path = os.path.join(image_path, image_name)
print("im_path:", im_path)
max_pix = 480
origin_min_pix = min(image.size[0], image.size[1])
if image.size[0] > image.size[1]:
min_pix = int(image.size[1] * (max_pix / image.size[0]))
newsize = (max_pix, min_pix)
else:
min_pix = int(image.size[0] * (max_pix / image.size[1]))
newsize = (min_pix, max_pix)
image = image.resize(newsize)
image.save(open(im_path, "wb"))
return im_path, ext
return None, None
# 定义一个函数,根据字体的大小,识别每个章节名称,并返回一个列表
def get_chapter_names(self, ):
# # 打开一个pdf文件
doc = fitz.open(self.path) # pdf文档
text_list = [page.get_text() for page in doc]
all_text = ''
for text in text_list:
all_text += text
# # 创建一个空列表,用于存储章节名称
chapter_names = []
for line in all_text.split('\n'):
line_list = line.split(' ')
if '.' in line:
point_split_list = line.split('.')
space_split_list = line.split(' ')
if 1 < len(space_split_list) < 5:
if 1 < len(point_split_list) < 5 and (
point_split_list[0] in self.roman_num or point_split_list[0] in self.digit_num):
print("line:", line)
chapter_names.append(line)
# 这段代码可能会有新的bug,本意是为了消除"Introduction"的问题的!
elif 1 < len(point_split_list) < 5:
print("line:", line)
chapter_names.append(line)
return chapter_names
def get_title(self):
doc = self.pdf # 打开pdf文件
max_font_size = 0 # 初始化最大字体大小为0
max_string = "" # 初始化最大字体大小对应的字符串为空
max_font_sizes = [0]
for page_index, page in enumerate(doc): # 遍历每一页
text = page.get_text("dict") # 获取页面上的文本信息
blocks = text["blocks"] # 获取文本块列表
for block in blocks: # 遍历每个文本块
if block["type"] == 0 and len(block['lines']): # 如果是文字类型
if len(block["lines"][0]["spans"]):
font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小
max_font_sizes.append(font_size)
if font_size > max_font_size: # 如果字体大小大于当前最大值
max_font_size = font_size # 更新最大值
max_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串
max_font_sizes.sort()
print("max_font_sizes", max_font_sizes[-10:])
cur_title = ''
for page_index, page in enumerate(doc): # 遍历每一页
text = page.get_text("dict") # 获取页面上的文本信息
blocks = text["blocks"] # 获取文本块列表
for block in blocks: # 遍历每个文本块
if block["type"] == 0 and len(block['lines']): # 如果是文字类型
if len(block["lines"][0]["spans"]):
cur_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串
font_flags = block["lines"][0]["spans"][0]["flags"] # 获取第一行第一段文字的字体特征
font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小
# print(font_size)
if abs(font_size - max_font_sizes[-1]) < 0.3 or abs(font_size - max_font_sizes[-2]) < 0.3:
# print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags)
if len(cur_string) > 4 and "arXiv" not in cur_string:
# print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags)
if cur_title == '':
cur_title += cur_string
else:
cur_title += ' ' + cur_string
self.title_page = page_index
# break
title = cur_title.replace('\n', ' ')
return title
def _get_all_page_index(self):
# 定义需要寻找的章节名称列表
section_list = ["Abstract",
'Introduction', 'Related Work', 'Background',
"Introduction and Motivation", "Computation Function", " Routing Function",
"Preliminary", "Problem Formulation",
'Methods', 'Methodology', "Method", 'Approach', 'Approaches',
# exp
"Materials and Methods", "Experiment Settings",
'Experiment', "Experimental Results", "Evaluation", "Experiments",
"Results", 'Findings', 'Data Analysis',
"Discussion", "Results and Discussion", "Conclusion",
'References']
# 初始化一个字典来存储找到的章节和它们在文档中出现的页码
section_page_dict = {}
# 遍历每一页文档
for page_index, page in enumerate(self.pdf):
# 获取当前页面的文本内容
cur_text = page.get_text()
# 遍历需要寻找的章节名称列表
for section_name in section_list:
# 将章节名称转换成大写形式
section_name_upper = section_name.upper()
# 如果当前页面包含"Abstract"这个关键词
if "Abstract" == section_name and section_name in cur_text:
# 将"Abstract"和它所在的页码加入字典中
section_page_dict[section_name] = page_index
# 如果当前页面包含章节名称,则将章节名称和它所在的页码加入字典中
else:
if section_name + '\n' in cur_text:
section_page_dict[section_name] = page_index
elif section_name_upper + '\n' in cur_text:
section_page_dict[section_name] = page_index
# 返回所有找到的章节名称及它们在文档中出现的页码
return section_page_dict
def _get_all_page(self):
"""
获取PDF文件中每个页面的文本信息,并将文本信息按照章节组织成字典返回。
Returns:
section_dict (dict): 每个章节的文本信息字典,key为章节名,value为章节文本。
"""
text = ''
text_list = []
section_dict = {}
# 再处理其他章节:
text_list = [page.get_text() for page in self.pdf]
for sec_index, sec_name in enumerate(self.section_page_dict):
print(sec_index, sec_name, self.section_page_dict[sec_name])
if sec_index <= 0 and self.abs:
continue
else:
# 直接考虑后面的内容:
start_page = self.section_page_dict[sec_name]
if sec_index < len(list(self.section_page_dict.keys())) - 1:
end_page = self.section_page_dict[list(self.section_page_dict.keys())[sec_index + 1]]
else:
end_page = len(text_list)
print("start_page, end_page:", start_page, end_page)
cur_sec_text = ''
if end_page - start_page == 0:
if sec_index < len(list(self.section_page_dict.keys())) - 1:
next_sec = list(self.section_page_dict.keys())[sec_index + 1]
if text_list[start_page].find(sec_name) == -1:
start_i = text_list[start_page].find(sec_name.upper())
else:
start_i = text_list[start_page].find(sec_name)
if text_list[start_page].find(next_sec) == -1:
end_i = text_list[start_page].find(next_sec.upper())
else:
end_i = text_list[start_page].find(next_sec)
cur_sec_text += text_list[start_page][start_i:end_i]
else:
for page_i in range(start_page, end_page):
# print("page_i:", page_i)
if page_i == start_page:
if text_list[start_page].find(sec_name) == -1:
start_i = text_list[start_page].find(sec_name.upper())
else:
start_i = text_list[start_page].find(sec_name)
cur_sec_text += text_list[page_i][start_i:]
elif page_i < end_page:
cur_sec_text += text_list[page_i]
elif page_i == end_page:
if sec_index < len(list(self.section_page_dict.keys())) - 1:
next_sec = list(self.section_page_dict.keys())[sec_index + 1]
if text_list[start_page].find(next_sec) == -1:
end_i = text_list[start_page].find(next_sec.upper())
else:
end_i = text_list[start_page].find(next_sec)
cur_sec_text += text_list[page_i][:end_i]
section_dict[sec_name] = cur_sec_text.replace('-\n', '').replace('\n', ' ')
return section_dict
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
self.args = args
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
# 定义一个函数,根据关键词和页码生成arxiv搜索链接
def get_url(self, keyword, page):
base_url = "https://arxiv.org/search/?"
params = {
"query": keyword,
"searchtype": "all", # 搜索所有字段
"abstracts": "show", # 显示摘要
"order": "-announced_date_first", # 按日期降序排序
"size": 50 # 每页显示50条结果
}
if page > 0:
params["start"] = page * 50 # 设置起始位置
return base_url + requests.compat.urlencode(params)
# 定义一个函数,根据链接获取网页内容,并解析出论文标题
def get_titles(self, url, days=1):
titles = []
# 创建一个空列表来存储论文链接
links = []
dates = []
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.find_all("li", class_="arxiv-result") # 找到所有包含论文信息的li标签
today = datetime.date.today()
last_days = datetime.timedelta(days=days)
for article in articles:
title = article.find("p", class_="title").text # 找到每篇论文的标题,并去掉多余的空格和换行符
link = article.find("span").find_all("a")[0].get('href')
date_text = article.find("p", class_="is-size-7").text
date_text = date_text.split('\n')[0].split("Submitted ")[-1].split("; ")[0]
date_text = datetime.datetime.strptime(date_text, "%d %B, %Y").date()
if today - date_text <= last_days:
titles.append(title.strip())
links.append(link)
dates.append(date_text)
# print("links:", links)
return titles, links, dates
# 定义一个函数,根据关键词获取所有可用的论文标题,并打印出来
def get_all_titles_from_web(self, keyword, page_num=1, days=1):
title_list, link_list, date_list = [], [], []
for page in range(page_num):
url = self.get_url(keyword, page) # 根据关键词和页码生成链接
titles, links, dates = self.get_titles(url, days) # 根据链接获取论文标题
if not titles: # 如果没有获取到任何标题,说明已经到达最后一页,退出循环
break
for title_index, title in enumerate(titles): # 遍历每个标题,并打印出来
print(page, title_index, title, links[title_index], dates[title_index])
title_list.extend(titles)
link_list.extend(links)
date_list.extend(dates)
print("-" * 40)
return title_list, link_list, date_list
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def get_arxiv_web(self, args, page_num=1, days=2):
titles, links, dates = self.get_all_titles_from_web(args.query, page_num=page_num, days=days)
paper_list = []
for title_index, title in enumerate(titles):
if title_index + 1 > args.max_results:
break
print(title_index, title, links[title_index], dates[title_index])
url = links[title_index] + ".pdf" # the link of the pdf document
filename = self.try_download_pdf(url, title)
paper = Paper(path=filename,
url=links[title_index],
title=title,
)
paper_list.append(paper)
return paper_list
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, url, title):
response = requests.get(url) # send a GET request to the url
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
path = self.root_path + 'pdf_files/' + self.validateTitle(self.args.query) + '-' + date_str
try:
os.makedirs(path)
except:
pass
filename = os.path.join(path, self.validateTitle(title)[:80] + '.pdf')
with open(filename, "wb") as f: # open a file with write and binary mode
f.write(response.content) # write the content of the response to the file
return filename
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, url, title):
return self.download_pdf(url, title)
def summary_with_chat(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += 'Url:' + paper.url
text += 'Abstrat:' + paper.abs
text += 'Paper_info:' + paper.section_text_dict['paper_info']
# intro
text += list(paper.section_text_dict.values())[0]
try:
chat_summary_text = self.chat_summary(text=text)
except Exception as e:
print("summary_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
summary_prompt_token = offset + 1000 + 150
chat_summary_text = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token)
htmls.append('## Paper:' + str(paper_index + 1))
htmls.append('\n\n\n')
htmls.append(chat_summary_text)
# 第二步总结方法:
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
method_key = ''
for parse_key in paper.section_text_dict.keys():
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
method_key = parse_key
break
if method_key != '':
text = ''
method_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text
# methods
method_text += paper.section_text_dict[method_key]
text = summary_text + "\n\n<Methods>:\n\n" + method_text
# chat_method_text = self.chat_method(text=text)
try:
chat_method_text = self.chat_method(text=text)
except Exception as e:
print("method_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
method_prompt_token = offset + 800 + 150
chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token)
htmls.append(chat_method_text)
else:
chat_method_text = ''
htmls.append("\n" * 4)
# 第三步总结全文,并打分:
conclusion_key = ''
for parse_key in paper.section_text_dict.keys():
if 'conclu' in parse_key.lower():
conclusion_key = parse_key
break
text = ''
conclusion_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n <Method summary>:\n" + chat_method_text
if conclusion_key != '':
# conclusion
conclusion_text += paper.section_text_dict[conclusion_key]
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
else:
text = summary_text
# chat_conclusion_text = self.chat_conclusion(text=text)
try:
chat_conclusion_text = self.chat_conclusion(text=text)
except Exception as e:
print("conclusion_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
conclusion_prompt_token = offset + 800 + 150
chat_conclusion_text = self.chat_conclusion(text=text,
conclusion_prompt_token=conclusion_prompt_token)
htmls.append(chat_conclusion_text)
htmls.append("\n" * 4)
# # 整合成一个文件,打包保存下来。
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join(self.root_path, 'export')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path,
date_str + '-' + self.validateTitle(self.query) + "." + self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text, conclusion_prompt_token=800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - conclusion_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a reviewer in the field of [" + self.key_word + "] and you need to critically review this article"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:" + clip_text},
# 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
- (1):What is the significance of this piece of work?
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
.......
Follow the format of the output later:
8. Conclusion: \n\n
- (1):xxx;\n
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("conclusion_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text, method_prompt_token=800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - method_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions." + clip_text},
# 背景知识
{"role": "user", "content": """
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.
- (1):...
- (2):...
- (3):...
- .......
Follow the format of the output that follows:
7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("method_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text, summary_prompt_token=1100):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - summary_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
{"role": "assistant",
"content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: " + clip_text},
{"role": "user", "content": """
1. Mark the title of the paper (with Chinese translation)
2. list all the authors' names (use English)
3. mark the first author's affiliation (output {} translation only)
4. mark the keywords of this article (use English)
5. link to the paper, Github code link (if available, fill in Github:None if not)
6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
Follow the format of the output that follows:
1. Title: xxx\n\n
2. Authors: xxx\n\n
3. Affiliation: xxx\n\n
4. Keywords: xxx\n\n
5. Urls: xxx or xxx , xxx \n\n
6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
""".format(self.language, self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("summary_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def chat_arxiv_main(args):
reader1 = Reader(key_word=args.key_word,
query=args.query,
args=args
)
reader1.show_info()
paper_list = reader1.get_arxiv_web(args=args, page_num=args.page_num, days=args.days)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--query", type=str, default='GPT-4', help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='GPT robot', help="the key word of user research fields")
parser.add_argument("--page_num", type=int, default=1, help="the maximum number of page")
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
parser.add_argument("--days", type=int, default=1, help="the last days of arxiv papers of this query")
parser.add_argument("--sort", type=str, default="web", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False,
help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='zh', help="The other output lauguage is English, is en")
arxiv_args = ArxivParams(**vars(parser.parse_args()))
import time
start_time = time.time()
chat_arxiv_main(args=arxiv_args)
print("summary time:", time.time() - start_time)
| [
" \n 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.\n - (1):...\n - (2):...\n - (3):...\n - .......\n Follow the format of the output that follows: \n 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n ....... \n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] and you need to critically review this article",
"This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions.PLACEHOLDER",
" \n 1. Mark the title of the paper (with Chinese translation)\n 2. list all the authors' names (use English)\n 3. mark the first author's affiliation (output {} translation only) \n 4. mark the keywords of this article (use English)\n 5. link to the paper, Github code link (if available, fill in Github:None if not)\n 6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)\n - (1):What is the research background of this article?\n - (2):What are the past methods? What are the problems with them? Is the approach well motivated?\n - (3):What is the research methodology proposed in this paper?\n - (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?\n Follow the format of the output that follows: \n 1. Title: xxx\n\n\n 2. Authors: xxx\n\n\n 3. Affiliation: xxx\n\n \n 4. Keywords: xxx\n\n \n 5. Urls: xxx or xxx , xxx \n\n \n 6. Summary: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n - (4):xxx.\n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. \n ",
" \n 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).\n - (1):What is the significance of this piece of work?\n - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. \n .......\n Follow the format of the output later: \n 8. Conclusion: \n\n\n - (1):xxx;\n \n - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] who is good at summarizing papers using concise statements",
"This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:PLACEHOLDER",
"This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: PLACEHOLDER",
"You are a researcher in the field of [",
"You are a reviewer in the field of ["
] |
2024-01-10 | liuxing9848/ChatPaper-1 | chat_reviewer.py | import argparse
import configparser
import datetime
import json
import os
import re
import time
from collections import namedtuple
import numpy as np
import openai
import tenacity
import tiktoken
from get_paper import Paper
ReviewerParams = namedtuple(
"Params", ["paper_path", "file_format", "research_fields", "language"]
)
# 定义Reviewer类
class Reviewer:
# 初始化方法,设置属性
def __init__(self, args=None):
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
def validateTitle(self, title):
# 修正论文的路径格式
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def review_by_chatgpt(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
sections_of_interest = self.stage_1(paper)
# extract the essential parts of the paper
text = ''
text += 'Title:' + paper.title + '. '
text += 'Abstract: ' + paper.section_texts['Abstract']
intro_title = next((item for item in paper.section_names if 'ntroduction' in item.lower()), None)
if intro_title is not None:
text += 'Introduction: ' + paper.section_texts[intro_title]
# Similar for conclusion section
conclusion_title = next((item for item in paper.section_names if 'onclusion' in item), None)
if conclusion_title is not None:
text += 'Conclusion: ' + paper.section_texts[conclusion_title]
for heading in sections_of_interest:
if heading in paper.section_names:
text += heading + ': ' + paper.section_texts[heading]
chat_review_text = self.chat_review(text=text)
htmls.append('## Paper:' + str(paper_index + 1))
htmls.append('\n\n\n')
htmls.append(chat_review_text)
# 将审稿意见保存起来
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join('./', 'output_file')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path,
date_str + '-' + self.validateTitle(paper.title) + "." + self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
def stage_1(self, paper):
htmls = []
text = ''
text += 'Title: ' + paper.title + '. '
text += 'Abstract: ' + paper.section_texts['Abstract']
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
messages = [
{"role": "system",
"content": f"You are a professional reviewer in the field of {args.research_fields}. "
f"I will give you a paper. You need to review this paper and discuss the novelty and originality of ideas, correctness, clarity, the significance of results, potential impact and quality of the presentation. "
f"Due to the length limitations, I am only allowed to provide you the abstract, introduction, conclusion and at most two sections of this paper."
f"Now I will give you the title and abstract and the headings of potential sections. "
f"You need to reply at most two headings. Then I will further provide you the full information, includes aforementioned sections and at most two sections you called for.\n\n"
f"Title: {paper.title}\n\n"
f"Abstract: {paper.section_texts['Abstract']}\n\n"
f"Potential Sections: {paper.section_names[2:-1]}\n\n"
f"Follow the following format to output your choice of sections:"
f"{{chosen section 1}}, {{chosen section 2}}\n\n"},
{"role": "user", "content": text},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print(result)
return result.split(',')
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_review(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
review_prompt_token = 1000
text_token = len(self.encoding.encode(text))
input_text_index = int(len(text) * (self.max_token_num - review_prompt_token) / text_token)
input_text = "This is the paper for your review:" + text[:input_text_index]
with open('ReviewFormat.txt', 'r') as file: # 读取特定的审稿格式
review_format = file.read()
messages = [
{"role": "system",
"content": "You are a professional reviewer in the field of " + args.research_fields + ". Now I will give you a paper. You need to give a complete review opinion according to the following requirements and format:" + review_format + " Please answer in {}.".format(
self.language)},
{"role": "user", "content": input_text},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("********" * 10)
print(result)
print("********" * 10)
print("prompt_token_used:", response.usage.prompt_tokens)
print("completion_token_used:", response.usage.completion_tokens)
print("total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
def chat_reviewer_main(args):
reviewer1 = Reviewer(args=args)
# 开始判断是路径还是文件:
paper_list = []
if args.paper_path.endswith(".pdf"):
paper_list.append(Paper(path=args.paper_path))
else:
for root, dirs, files in os.walk(args.paper_path):
print("root:", root, "dirs:", dirs, 'files:', files) # 当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
reviewer1.review_by_chatgpt(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--paper_path", type=str, default='', help="path of papers")
parser.add_argument("--file_format", type=str, default='txt', help="output file format")
parser.add_argument("--research_fields", type=str,
default='computer science, artificial intelligence and reinforcement learning',
help="the research fields of paper")
parser.add_argument("--language", type=str, default='en', help="output lauguage, en or zh")
reviewer_args = ReviewerParams(**vars(parser.parse_args()))
start_time = time.time()
chat_reviewer_main(args=reviewer_args)
print("review time:", time.time() - start_time)
| [
"You are a professional reviewer in the field of ",
" Please answer in {}.",
". Now I will give you a paper. You need to give a complete review opinion according to the following requirements and format:",
"I will give you a paper. You need to review this paper and discuss the novelty and originality of ideas, correctness, clarity, the significance of results, potential impact and quality of the presentation. ",
"{chosen section 1}, {chosen section 2}\n\n",
"Follow the following format to output your choice of sections:",
"Now I will give you the title and abstract and the headings of potential sections. ",
"You need to reply at most two headings. Then I will further provide you the full information, includes aforementioned sections and at most two sections you called for.\n\n",
"Due to the length limitations, I am only allowed to provide you the abstract, introduction, conclusion and at most two sections of this paper.",
"1000"
] |
2024-01-10 | liuxing9848/ChatPaper-1 | chat_response.py | import argparse
import configparser
import datetime
import json
import os
import re
import time
from collections import namedtuple
import numpy as np
import openai
import tenacity
import tiktoken
from get_paper import Paper
# ChatResponse
ResponseParams = namedtuple(
"Params", ["comment_path", "file_format", "language"]
)
# 定义Response类
class Response:
# 初始化方法,设置属性
def __init__(self, args=None):
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
def response_by_chatgpt(self, comment_path):
htmls = []
# 读取回复的内容
with open(comment_path, 'r') as file:
comments = file.read()
chat_response_text = self.chat_response(text=comments)
htmls.append(chat_response_text)
# 将审稿意见保存起来
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join('./', 'response_file')
os.makedirs(export_path)
except:
pass
file_name = os.path.join(export_path, date_str + '-Response.' + self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_response(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
response_prompt_token = 1000
text_token = len(self.encoding.encode(text))
input_text_index = int(len(text) * (self.max_token_num - response_prompt_token) / text_token)
input_text = "This is the review comments:" + text[:input_text_index]
messages = [
{"role": "system", "content": """You are the author, you submitted a paper, and the reviewers gave the review comments.
Please reply with what we have done, not what we will do.
You need to extract questions from the review comments one by one, and then respond point-to-point to the reviewers’ concerns.
Please answer in {}. Follow the format of the output later:
- Response to reviewers
#1 reviewer
Concern #1: xxxx
Author response: xxxxx
Concern #2: xxxx
Author response: xxxxx
...
#2 reviewer
Concern #1: xxxx
Author response: xxxxx
Concern #2: xxxx
Author response: xxxxx
...
#3 reviewer
Concern #1: xxxx
Author response: xxxxx
Concern #2: xxxx
Author response: xxxxx
...
""".format(self.language)
},
{"role": "user", "content": input_text},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("********" * 10)
print(result)
print("********" * 10)
print("prompt_token_used:", response.usage.prompt_tokens)
print("completion_token_used:", response.usage.completion_tokens)
print("total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
def chat_response_main(args):
Response1 = Response(args=args)
Response1.response_by_chatgpt(comment_path=args.comment_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--comment_path", type=str, default='review_comments.txt', help="path of comment")
parser.add_argument("--file_format", type=str, default='txt', help="output file format")
parser.add_argument("--language", type=str, default='en', help="output lauguage, en or zh")
response_args = ResponseParams(**vars(parser.parse_args()))
# args = parser.parse_args()
start_time = time.time()
chat_response_main(args=response_args)
print("response time:", time.time() - start_time)
| [
"You are the author, you submitted a paper, and the reviewers gave the review comments. \n Please reply with what we have done, not what we will do.\n You need to extract questions from the review comments one by one, and then respond point-to-point to the reviewers’ concerns. \n Please answer in {}. Follow the format of the output later: \n - Response to reviewers\n #1 reviewer\n Concern #1: xxxx\n Author response: xxxxx\n\n Concern #2: xxxx\n Author response: xxxxx\n ...\n\n #2 reviewer\n Concern #1: xxxx\n Author response: xxxxx\n\n Concern #2: xxxx\n Author response: xxxxx\n ...\n\n #3 reviewer\n Concern #1: xxxx\n Author response: xxxxx\n\n Concern #2: xxxx\n Author response: xxxxx\n ...\n \n ",
"This is the review comments:PLACEHOLDER",
"1000"
] |
2024-01-10 | mckinsey/vizro | vizro-ai~tests~unit~vizro-ai~components~test_explanation.py | import pytest
from langchain.llms.fake import FakeListLLM
from vizro_ai.components import GetCodeExplanation
@pytest.fixture
def fake_llm():
# This is to simulate the response of LLM
responses = [
'{"business_insights": "The chart shows '
"the composition of GDP in different continents. The horizontal line represents "
'the average GDP across all continents.", "code_explanation": "This code groups the DataFrame by '
"the 'continent' column and calculates the sum of the 'gdpPercap' column for each continent. It then creates "
"a bar chart using Plotly Express. "
'It also adds a horizontal line at the average GDP value. Finally, it returns the chart."}'
]
return FakeListLLM(responses=responses)
@pytest.fixture
def code_snippet():
code_snippet = """
from vizro.models.types import capture
import vizro.plotly.express as px
import pandas as pd
@capture('graph')
def custom_chart(data_frame: pd.DataFrame = None):
if data_frame is None:
data_frame = pd.DataFrame()
df = data_frame.groupby('continent')['gdpPercap'].sum().reset_index().rename(columns={'gdpPercap': 'total_gdp'})
fig = px.bar(df, x='continent', y='total_gdp', color='continent', title='Composition of GDP in Continents')
fig.add_hline(y=df['total_gdp'].mean(), line_dash='dash', line_color='red', annotation_text='Average GDP')
return fig
fig = custom_chart(data_frame=df)
"""
return code_snippet
@pytest.fixture
def expected_business_insights():
business_insights = (
"The chart shows the composition of GDP in different continents. "
"The horizontal line represents the average GDP across all continents."
)
return business_insights
@pytest.fixture
def expected_code_explanation():
code_explanation = (
"This code groups the DataFrame by the 'continent' column and calculates the sum of "
"the 'gdpPercap' column for each continent. It then creates a bar chart using "
"Plotly Express and Vizro. "
"It also adds a horizontal line at the average GDP value. Finally, it returns the chart."
"\n<br>**This customized chart can be directly used in a Vizro dashboard.** "
"\nClick [custom chart docs]"
"(https://vizro.readthedocs.io/en/stable/pages/user_guides/custom_charts/) "
"for more information."
)
return code_explanation
@pytest.fixture
def loaded_response():
loaded_response = {
"business_insights": "The chart shows the composition of GDP in different continents. "
"The horizontal line represents the average GDP across all continents.",
"code_explanation": "This code groups the DataFrame by the 'continent' column and calculates the sum of "
"the 'gdpPercap' column for each continent. It then creates a bar chart using "
"Plotly Express. "
"It also adds a horizontal line at the average GDP value. Finally, it returns the chart.",
}
return loaded_response
class TestCodeExplanationInstantiation:
def test_instantiation(self):
explanation = GetCodeExplanation(llm=fake_llm)
assert explanation.llm == fake_llm
def setup_method(self, fake_llm):
self.get_code_explanation = GetCodeExplanation(llm=fake_llm)
def test_pre_process(self, code_snippet):
llm_kwargs, partial_vars = self.get_code_explanation._pre_process(code_snippet)
expected_partial_vars = {"code_snippet": code_snippet}
assert partial_vars == expected_partial_vars
def test_post_process(self, loaded_response, expected_business_insights, expected_code_explanation):
business_insights, code_explanation = self.get_code_explanation._post_process(loaded_response)
assert business_insights == expected_business_insights
assert code_explanation == expected_code_explanation
class TestChartSelection:
def test_fake_response(self, code_snippet, fake_llm, expected_business_insights, expected_code_explanation):
get_code_explanation = GetCodeExplanation(fake_llm)
business_insights, code_explanation = get_code_explanation.run(
chain_input="choose a best chart for describe the composition of gdp in continent, "
"and horizontal line for avg gdp",
code_snippet=code_snippet,
)
assert business_insights == expected_business_insights
assert code_explanation == expected_code_explanation
| [] |
2024-01-10 | mckinsey/vizro | vizro-ai~tests~unit~vizro-ai~components~test_chart_selection.py | import pandas as pd
import pytest
from langchain.llms.fake import FakeListLLM
from vizro_ai.components import GetChartSelection
@pytest.fixture
def fake_llm():
# This is to simulate the response of LLM
response = ['{"chart_type": "bar"}']
return FakeListLLM(responses=response)
class TestChartSelectionInstantiation:
def test_instantiation(self):
chart_selection = GetChartSelection(llm=fake_llm)
assert chart_selection.llm == fake_llm
def setup_method(self, fake_llm):
self.get_chart_selection = GetChartSelection(llm=fake_llm)
def test_pre_process(self):
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
llm_kwargs, partial_vars = self.get_chart_selection._pre_process(df)
expected_partial_vars = {"df_schema": "A: int64\nB: int64", "df_head": df.head().to_markdown()}
assert partial_vars == expected_partial_vars
@pytest.mark.parametrize(
"load_args, expected_chart_name",
[
({"chart_type": "line"}, "line"),
({"chart_type": "bar"}, "bar"),
({"chart_type": ["line", "bar"]}, "line,bar"),
],
)
def test_post_process(self, load_args, expected_chart_name):
chart_names = self.get_chart_selection._post_process(load_args)
assert chart_names == expected_chart_name
class TestChartSelection:
def test_fake_response(self, gapminder, fake_llm):
get_chart_selection = GetChartSelection(fake_llm)
target_chart = get_chart_selection.run(
df=gapminder, chain_input="choose a best chart for describe the composition"
)
assert target_chart == "bar"
| [] |
2024-01-10 | mckinsey/vizro | vizro-ai~src~vizro_ai~chains~_llm_chain.py | import json
import logging
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema import ChatGeneration, Generation
from langchain.schema.messages import AIMessage
from vizro_ai.chains._llm_models import LLM_MODELS
logger = logging.getLogger(__name__)
class VizroBaseChain(ABC):
"""Abstract method for base chain."""
@abstractmethod
def _construct_prompt(self):
"""Construct the prompt from partial variables and input."""
pass
@abstractmethod
def _custom_parse(self, result):
"""Parse the output."""
pass
@abstractmethod
def execute_chain(self, input_str: str):
"""Execute the LLMChain and get the response."""
pass
class FunctionCallChain(VizroBaseChain, ABC):
"""LLM Chain with Function Calling."""
def __init__(
self,
llm: LLM_MODELS,
raw_prompt: str,
partial_vars_map: Optional[Dict[Any, Any]] = None,
llm_kwargs: Optional[Dict[str, Any]] = None,
verbose: bool = True,
):
self.llm = llm
self.raw_prompt = raw_prompt
self.partial_vars_map = partial_vars_map
self.llm_kwargs = llm_kwargs
self.verbose = verbose
self._chain = None
def _construct_prompt(self, raw_prompt, partial_vars_map) -> str:
"""Construct the prompt from partial variables input."""
prompt = PromptTemplate(
input_variables=["input"],
template=raw_prompt,
partial_variables=partial_vars_map,
)
vars_set = set(re.findall(r"\{([^}]*)\}", raw_prompt))
vars_set -= {"input"}
partial_vars_input = partial_vars_map.keys()
if partial_vars_input != vars_set:
different_vars = partial_vars_input ^ vars_set
logging.warning(
f"Partial variables input is different from required partial variables in prompt, "
f"{different_vars} missing"
)
prompt.partial_variables = partial_vars_map
return prompt
@staticmethod
def _custom_parse(result) -> Dict[str, Any]:
"""Extract arguments from a list of chat generations.
It retrieves the arguments embedded within the message and returns them as a dictionary.
"""
element = result[0]
if isinstance(element, ChatGeneration):
if not isinstance(element.message, AIMessage):
raise TypeError(
f"Expected element.message to be of type AIMessage, " f"but got {type(element.message)}"
)
args_str = element.message.additional_kwargs.get("function_call", {}).get("arguments", "{}")
# Remove trailing commas before a closing brace
args_str = re.sub(r",\s*}", "}", args_str)
return json.loads(args_str, strict=False)
# TODO delete when ChatGeneration can be supported for fake llm in langchain
# fake_llm does not support ChatGeneration, only support Generation
# For testing, we need to adapt for testing
elif isinstance(element, Generation):
return json.loads(element.text, strict=False)
@property
def chain(self) -> LLMChain:
"""LLMChain instance."""
prompt = self._construct_prompt(self.raw_prompt, self.partial_vars_map)
self.llm_kwargs = self.llm_kwargs or {}
self._chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose, llm_kwargs=self.llm_kwargs)
return self._chain
def execute_chain(self, input_str: str) -> Dict[str, Any]:
"""Execute chain.
Args:
input_str: user question as input string.
Returns:
args as a dictionary
"""
raw_ans = self.chain.generate([{"input": input_str}])
args = self._custom_parse(raw_ans.generations[0])
return args
# class NonFunctionCallChain(VizroBaseChain, ABC):
# TODO implement non function call chain with different execution, prompt, and parser
# class VizroChainRunner:
# # TODO add cache here in runner class if we need cache
| [
"input"
] |
2024-01-10 | mckinsey/vizro | vizro-ai~tests~unit~vizro-ai~components~test_visual_code.py | import pytest
from langchain.llms.fake import FakeListLLM
from vizro_ai.components import GetVisualCode
@pytest.fixture
def chart_types():
return "bar"
@pytest.fixture
def df_code_1():
return """import pandas as pd
df = df.groupby('continent')['gdpPercap'].sum().reset_index()"""
@pytest.fixture
def output_visual_code_LLM_1():
return """import plotly.express as px
fig = px.bar(df, x='continent', y='total_gdpPercap', title='Composition of GDP by Continent')
fig.add_hline(y=df['total_gdpPercap'].mean(), line_dash='dash', line_color='red', annotation_text='Average GDP')
fig.show()"""
@pytest.fixture
def expected_final_output_1():
return """import vizro.plotly.express as px
import pandas as pd
df = df.groupby('continent')['gdpPercap'].sum().reset_index()
fig = px.bar(df, x='continent', y='total_gdpPercap', title='Composition of GDP by Continent')
fig.add_hline(y=df['total_gdpPercap'].mean(), line_dash='dash', line_color='red', annotation_text='Average GDP')
fig.show()"""
@pytest.fixture
def df_code_2():
return """import pandas as pd
df = df.query('year == 2007').groupby('continent')['pop'].sum().reset_index(name='total_pop')"""
@pytest.fixture
def output_visual_code_LLM_2():
return """import plotly.graph_objects as go
# Create a bar chart
fig = go.Figure(data=[go.Bar(x=df['continent'], y=df['total_pop'])])
# Update the layout
fig.update_layout(title='Increase in Population by Continent', xaxis_title='Continent', yaxis_title='Total Population')
# Show the chart
fig.show()"""
@pytest.fixture
def expected_final_output_2():
return """import plotly.graph_objects as go
import pandas as pd
df = df.query('year == 2007').groupby('continent')['pop'].sum().reset_index(name='total_pop')
# Create a bar chart
fig = go.Figure(data=[go.Bar(x=df['continent'], y=df['total_pop'])])
# Update the layout
fig.update_layout(title='Increase in Population by Continent', xaxis_title='Continent', yaxis_title='Total Population')
# Show the chart
fig.show()"""
@pytest.fixture
def fake_llm(output_visual_code_LLM_1):
"""This is to simulate the response of LLM."""
response = ['{{"visual_code": "{}"}}'.format(output_visual_code_LLM_1)]
return FakeListLLM(responses=response)
class TestGetVisualCodeInstantiation:
def test_instantiation(self):
chart_selection = GetVisualCode(llm=fake_llm)
assert chart_selection.llm == fake_llm
def setup_method(self, fake_llm):
self.get_visual_code = GetVisualCode(llm=fake_llm)
def test_pre_process(self, chart_types, df_code_1):
_, partial_vars = self.get_visual_code._pre_process(chart_types=chart_types, df_code=df_code_1)
assert partial_vars == {"chart_types": chart_types, "df_code": df_code_1}
@pytest.mark.parametrize(
"input,output,df_code",
[
("output_visual_code_LLM_1", "expected_final_output_1", "df_code_1"),
("output_visual_code_LLM_2", "expected_final_output_2", "df_code_2"),
],
)
def test_post_process(self, input, output, df_code, request):
input = request.getfixturevalue(input)
output = request.getfixturevalue(output)
df_code = request.getfixturevalue(df_code)
loaded_args = {"visual_code": input}
processed_code = self.get_visual_code._post_process(loaded_args, df_code=df_code)
assert processed_code == output
class TestGetVisualCodeRun:
def test_fake_run(self, fake_llm, output_visual_code_LLM_1, expected_final_output_1, df_code_1, chart_types):
get_visual_code = GetVisualCode(fake_llm)
processed_code = get_visual_code.run(
chain_input=output_visual_code_LLM_1, df_code=df_code_1, chart_types=chart_types
)
assert processed_code == expected_final_output_1
| [] |
2024-01-10 | mckinsey/vizro | vizro-ai~tests~unit~vizro-ai~components~test_dataframe_craft.py | import re
import pandas as pd
import pytest
from langchain.llms.fake import FakeListLLM
from vizro_ai.components import GetDataFrameCraft
def dataframe_code():
return """
data_frame = data_frame.groupby('continent')['gdpPercap'].sum().reset_index()
data_frame = data_frame.rename(columns={'gdpPercap': 'total_gdp'})
data_frame.plot(kind='bar', x='continent', y='total_gdp', color='skyblue', legend=False)"""
@pytest.fixture
def fake_llm():
dataframe_code_before_postprocess = re.sub(
r"[\x00-\x1f]", lambda m: "\\u{:04x}".format(ord(m.group(0))), dataframe_code()
)
response = ['{{"dataframe_code": "{}"}}'.format(dataframe_code_before_postprocess)]
return FakeListLLM(responses=response)
@pytest.fixture
def input_df():
input_df = pd.DataFrame(
{
"contintent": ["Asia", "Asia", "America", "Europe"],
"country": ["China", "India", "US", "UK"],
"gdpPercap": [102, 110, 300, 200],
}
)
return input_df
class TestDataFrameCraftMethods:
def test_instantiation(self):
dataframe_craft = GetDataFrameCraft(llm=fake_llm)
assert dataframe_craft.llm == fake_llm
def setup_method(self, fake_llm):
self.get_dataframe_craft = GetDataFrameCraft(llm=fake_llm)
def test_pre_process(self, input_df):
llm_kwargs_to_use, partial_vars = self.get_dataframe_craft._pre_process(df=input_df)
expected_partial_vars = {
"df_schema": "contintent: object\ncountry: object\ngdpPercap: int64",
"df_head": input_df.head().to_markdown(),
}
assert partial_vars == expected_partial_vars
@pytest.mark.parametrize(
"code_string, expected_code_string",
[
(
"df = pd.DataFrame({'test1': [1, 2], 'test2': [3, 4]})",
"import pandas as pd\ndf = pd.DataFrame({'test1': [1, 2], 'test2': [3, 4]}).reset_index()",
),
(
"df = pd.DataFrame({'test1': [1, 2], 'test2': [3, 4]}).reset_index()",
"import pandas as pd\ndf = pd.DataFrame({'test1': [1, 2], 'test2': [3, 4]}).reset_index()",
),
(
"data_frame = pd.DataFrame({'test1': [1, 1, 2], 'test2': [3, 4, 5]})\n"
"data_frame = data_frame.groupby('test1')['test2'].sum()",
"import pandas as pd\ndata_frame = pd.DataFrame({'test1': [1, 1, 2], 'test2': [3, 4, 5]})\n"
"df = data_frame.groupby('test1')['test2'].sum().reset_index()",
),
(
"import pandas as pd\n"
"df = pd.DataFrame({'test1': [1, 2], 'test2': [3, 4]}).plot(kind='bar', x='test1', y='test2')",
"import pandas as pd\ndf = pd.DataFrame({'test1': [1, 2], 'test2': [3, 4]}).reset_index()",
),
],
)
def test_post_process(self, code_string, expected_code_string, input_df):
load_args = {"dataframe_code": code_string}
df_code = self.get_dataframe_craft._post_process(load_args, input_df)
assert df_code == expected_code_string
class TestDataFrameCraftResponse:
def test_fake_response(self, input_df, fake_llm):
get_dataframe_craft = GetDataFrameCraft(fake_llm)
df_code = get_dataframe_craft.run(
chain_input="choose a best chart for describe the composition of gdp in continent, "
"and horizontal line for avg gdp",
df=input_df,
)
assert (
df_code == "import pandas as pd\n "
"data_frame = data_frame.groupby('continent')['gdpPercap'].sum().reset_index()\n "
"data_frame = data_frame.rename(columns={'gdpPercap': 'total_gdp'})\n"
"df = data_frame.reset_index()"
)
| [] |
2024-01-10 | mckinsey/vizro | vizro-ai~src~vizro_ai~chains~_llm_models.py | from typing import Callable, Dict, List, Union
from langchain.chat_models import ChatOpenAI
try:
from pydantic.v1 import BaseModel, Field
except ImportError: # pragma: no cov
from pydantic import BaseModel, Field
# TODO add new wrappers in if new model support is added
LLM_MODELS = Union[ChatOpenAI]
# TODO constant of model inventory, can be converted to yaml and link to docs
PREDEFINED_MODELS: List[Dict[str, any]] = [
{
"name": "gpt-3.5-turbo-0613",
"max_tokens": 4096,
"wrapper": ChatOpenAI,
},
{
"name": "gpt-4-0613",
"max_tokens": 8192,
"wrapper": ChatOpenAI,
},
{
"name": "gpt-3.5-turbo-1106",
"max_tokens": 16385,
"wrapper": ChatOpenAI,
},
{
"name": "gpt-4-1106-preview",
"max_tokens": 128000,
"wrapper": ChatOpenAI,
},
]
class LLM(BaseModel):
"""Represents a Language Learning Model (LLM).
Attributes:
name (str): The name of the LLM.
max_tokens (int): The maximum number of tokens that the LLM can handle.
wrapper (callable): The langchain function used to instantiate the model.
"""
name: str
max_tokens: int
wrapper: Callable = Field(..., description="The langchain function used to instantiate the model.")
class ModelConstructor:
"""Manages available Language Learning Models (LLMs).
Provides methods to fetch LLM details and instantiate appropriate wrappers.
"""
models: Dict[str, LLM]
def __init__(self):
"""Initializes the model manager with a set of predefined LLMs."""
self.models = {model["name"]: LLM(**model) for model in PREDEFINED_MODELS}
def get_llm_model(self, model_name: str, temperature: float = 0) -> LLM_MODELS:
"""Fetches and initializes an instance of the LLM.
Args:
model_name (str): The name of the LLM.
temperature (int, optional): A parameter for the wrapper. Defaults to 0.
Returns:
The initialized instance of the LLM.
Raises:
ValueError: If the model name is not found.
"""
model = self.models.get(model_name.lower())
if model:
return model.wrapper(model_name=model.name, temperature=temperature)
else:
raise ValueError(f"Model {model_name} not found!")
if __name__ == "__main__":
model_manager = ModelConstructor()
llm_chat_openai = model_manager.get_llm_model("gpt-3.5-turbo-0613", temperature=0)
| [] |
2024-01-10 | yhcc/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | developerrahulofficial/AI-Girlfriend | waifu.py | import openai
import speech_recognition as sr
from gtts import gTTS
from elevenlabs import generate, save, set_api_key, voices
import sounddevice as sd
import soundfile as sf
from dotenv import load_dotenv
from os import getenv, path
from json import load, dump, JSONDecodeError
class Waifu:
def __init__(self) -> None:
self.mic = None
self.recogniser = None
self.user_input_service = None
self.stt_duration = None
self.chatbot_service = None
self.chatbot_model = None
self.chatbot_temperature = None
self.chatbot_personality_file = None
self.message_history = []
self.context = []
self.tts_service = None
self.tts_voice = None
self.tts_model = None
def initialise(self, user_input_service:str | None = None, stt_duration:float | None = None, mic_index:int | None = None,
chatbot_service:str | None = None, chatbot_model:str | None = None, chatbot_temperature:float | None = None, personality_file:str | None = None,
tts_service:str | None = None, output_device = None, tts_voice:str | None = None, tts_model:str | None = None) -> None:
load_dotenv()
self.update_user_input(user_input_service=user_input_service, stt_duration=stt_duration)
self.mic = sr.Microphone(device_index=mic_index)
self.recogniser = sr.Recognizer()
openai.api_key = getenv("OPENAI_API_KEY")
self.update_chatbot(service = chatbot_service, model = chatbot_model, temperature = chatbot_temperature, personality_file = personality_file)
self.__load_chatbot_data()
print("This is the output device :", output_device)
output_device = 4
self.update_tts(service=tts_service, output_device=output_device)
def update_user_input(self, user_input_service:str | None = 'whisper', stt_duration:float | None = 0.5) -> None:
if user_input_service:
self.user_input_service = user_input_service
elif self.user_input_service is None:
self.user_input_service = 'whisper'
if stt_duration:
self.stt_duration = stt_duration
elif self.stt_duration is None:
self.stt_duration = 0.5
def update_chatbot(self, service:str | None = 'openai', model:str | None = 'gpt-3.5-turbo', temperature:float | None = 0.5, personality_file:str | None = 'personality.txt') -> None:
if service:
self.chatbot_service = service
elif self.chatbot_service is None:
self.chatbot_service = 'openai'
if model:
self.chatbot_model = model
elif self.chatbot_model is None:
self.chatbot_model = 'gpt-3.5-turbo'
if temperature:
self.chatbot_temperature = temperature
elif self.chatbot_temperature is None:
self.chatbot_temperature = 0.5
if personality_file:
self.chatbot_personality_file = personality_file
elif self.chatbot_personality_file is None:
self.chatbot_personality_file = 'personality.txt'
def update_tts(self, service:str | None = 'google', output_device = None, voice:str | None = None, model:str | None = None) -> None:
if service:
self.tts_service = service
elif self.tts_service is None:
self.tts_service = 'google'
set_api_key(getenv('ELEVENLABS_API_KEY'))
if voice:
self.tts_voice = voice
elif self.tts_voice is None:
self.tts_voice = 'Elli'
if model:
self.tts_model = model
elif self.tts_model is None:
self.tts_model = 'eleven_monolingual_v1'
if output_device is not None:
sd.check_output_settings(output_device)
sd.default.samplerate = 44100
sd.default.device = output_device
def get_audio_devices(self):
return sd.query_devices()
def get_user_input(self, service:str | None = None, stt_duration:float | None = None) -> str:
service = self.user_input_service if service is None else service
stt_duration = self.stt_duration if stt_duration is None else stt_duration
supported_stt_services = ['whisper', 'google']
supported_text_services = ['console']
result = ""
if service in supported_stt_services:
result = self.__recognise_speech(service, duration=stt_duration)
elif service in supported_text_services:
result = self.__get_text_input(service)
else:
raise ValueError(f"{service} servise doesn't supported. Please, use one of the following services: {supported_stt_services + supported_text_services}")
return result
def get_chatbot_response(self, prompt:str, service:str | None = None, model:str | None = None, temperature:float | None = None) -> str:
service = self.chatbot_service if service is None else service
model = self.chatbot_model if model is None else model
temperature = self.chatbot_temperature if temperature is None else temperature
supported_chatbot_services = ['openai', 'test']
result = ""
if service == 'openai':
result = self.__get_openai_response(prompt, model=model, temperature=temperature)
elif service == 'test':
result = "This is test answer from Waifu. Nya kawaii, senpai!"
else:
raise ValueError(f"{service} servise doesn't supported. Please, use one of the following services: {supported_chatbot_services}")
return result
def tts_say(self, text:str, service:str | None = None, voice:str | None = None, model:str | None = None) -> None:
service = self.tts_service if service is None else service
voice = self.tts_voice if voice is None else voice
model = self.tts_model if model is None else model
supported_tts_services = ['google', 'elevenlabs', 'console']
if service not in supported_tts_services:
raise ValueError(f"{service} servise doesn't supported. Please, use one of the following services: {supported_tts_services}")
if service == 'google':
gTTS(text=text, lang='en', slow=False, lang_check=False).save('output.mp3')
elif service == 'elevenlabs':
self.__elevenlabs_generate(text=text, voice=voice, model=model)
elif service == 'console':
print('\n\33[7m' + "Waifu:" + '\33[0m' + f' {text}')
return
data, fs = sf.read('output.mp3')
sd.play(data, fs)
sd.wait()
def conversation_cycle(self) -> dict:
input = self.get_user_input()
response = self.get_chatbot_response(input)
self.tts_say(response)
return dict(user = input, assistant = response)
def __get_openai_response(self, prompt:str, model:str, temperature:float) -> str:
self.__add_message('user', prompt)
messages = self.context + self.message_history
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
)
response = response.choices[0].message["content"]
self.__add_message('assistant', response)
self.__update_message_history()
return response
def __add_message(self, role:str, content:str) -> None:
self.message_history.append({'role': role, 'content': content})
def __load_chatbot_data(self, file_name:str | None = None) -> None:
file_name = self.chatbot_personality_file if file_name is None else file_name
with open(file_name, 'r') as f:
personality = f.read()
self.context = [{'role': 'system', 'content': personality}]
if path.isfile('./message_history.txt'):
with open('message_history.txt', 'r') as f:
try:
self.message_history = load(f)
except JSONDecodeError:
pass
def __update_message_history(self) -> None:
with open('message_history.txt', 'w') as f:
dump(self.message_history, f)
def __get_text_input(self, service:str) -> str:
user_input = ""
if service == 'console':
user_input = input('\n\33[42m' + "User:" + '\33[0m' + " ")
return user_input
def __elevenlabs_generate(self, text:str, voice:str, model:str, filename:str='output.mp3'):
audio = generate(
text=text,
voice=voice,
model=model
)
save(audio, filename)
def __recognise_speech(self, service:str, duration:float) -> str:
with self.mic as source:
print('(Start listening)')
self.recogniser.adjust_for_ambient_noise(source, duration=duration)
audio = self.recogniser.listen(source)
print('(Stop listening)')
result = ""
try:
if service == 'whisper':
result = self.__whisper_sr(audio)
elif service == 'google':
result = self.recogniser.recognize_google(audio)
except Exception as e:
print(f"Exeption: {e}")
return result
def __whisper_sr(self, audio) -> str:
with open('speech.wav', 'wb') as f:
f.write(audio.get_wav_data())
audio_file = open('speech.wav', 'rb')
transcript = openai.Audio.transcribe(model="whisper-1", file=audio_file)
return transcript['text']
def main():
w = Waifu()
w.initialise(user_input_service='console',
chatbot_service='openai',
tts_service='google', output_device=8)
w.conversation_cycle()
#while True:
# w.conversation_cycle()
if __name__ == "__main__":
main() | [] |
2024-01-10 | lucidrains/DALLE-pytorch | dalle_pytorch~dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def prob_mask_like(shape, prob, device):
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class SharedEmbedding(nn.Embedding):
def __init__(self, linear, start_index, end_index, **kwargs):
super().__init__(end_index - start_index, linear.weight.shape[1], **kwargs)
del self.weight
self.linear = linear
self.start_index = start_index
self.end_index = end_index
def forward(self, input):
return F.embedding(
input, self.linear.weight[self.start_index:self.end_index], self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
reinmax = False,
kl_div_loss_weight = 0.,
normalization = ((*((0.5,) * 3), 0), (*((0.5,) * 3), 1))
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.channels = channels
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.reinmax = reinmax
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = tuple(map(lambda t: t[:channels], normalization))
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
if self.straight_through and self.reinmax:
# use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612
# algorithm 2
one_hot = one_hot.detach()
π0 = logits.softmax(dim = 1)
π1 = (one_hot + (logits / temp).softmax(dim = 1)) / 2
π1 = ((log(π1) - logits).detach() + logits).softmax(dim = 1)
π2 = 2 * π1 - 0.5 * π0
one_hot = π2 - π2.detach() + one_hot
sampled = einsum('b n h w, n d -> b d h w', one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads, rotary_emb = False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads, rotary_emb = False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None,
loss_img_weight = 7,
stable = False,
sandwich_norm = False,
shift_tokens = True,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
share_input_output_emb = False,
optimize_for_inference = False,
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size)) if not rotary_emb else always(0)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn,
stable = stable,
sandwich_norm = sandwich_norm,
shift_tokens = shift_tokens,
rotary_emb = rotary_emb,
shared_attn_ids = shared_attn_ids,
shared_ff_ids = shared_ff_ids,
optimize_for_inference = optimize_for_inference,
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim = -1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
if share_input_output_emb:
self.text_emb = SharedEmbedding(self.to_logits[1], 0, num_text_tokens)
self.image_emb = SharedEmbedding(self.to_logits[1], num_text_tokens, total_tokens)
else:
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text = None,
*,
filter_thres = 0.5,
temperature = 1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device = device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
text_tokens = torch.cat((text_tokens, sample[:, None]), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
filter_thres = 0.5,
temperature = 1.,
img = None,
num_init_img_tokens = None,
cond_scale = 1.,
use_cache = False,
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens, int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim = -1)
prev_cache = None
cache = {} if use_cache else None
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self.forward_with_cond_scale(text, image, cond_scale = cond_scale, cache = cache)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample[:, None]), dim=-1)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward_with_cond_scale(self, *args, cond_scale = 1, cache = None, **kwargs):
if cond_scale == 1:
return self(*args, **kwargs)
prev_cache = cache.copy() if exists(cache) else None
logits = self(*args, cache = cache, **kwargs)
# discovery by Katherine Crowson
# https://twitter.com/RiversHaveWings/status/1478093658716966912
null_cond_logits = self(*args, null_cond_prob = 1., cache = prev_cache, **kwargs)
return null_cond_logits + (logits - null_cond_logits) * cond_scale
def forward(
self,
text,
image = None,
return_loss = False,
null_cond_prob = 0.,
cache = None,
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
batch, device, total_seq_len = text.shape[0], text.device, self.total_seq_len
# randomly remove text condition with <null_cond_prob> probability
if null_cond_prob > 0:
null_mask = prob_mask_like((batch,), null_cond_prob, device = device)
text *= rearrange(~null_mask, 'b -> b 1')
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device = device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value = 0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
channels = self.vae.channels
assert tuple(image.shape[1:]) == (channels, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
if exists(cache) and cache.get('offset'):
tokens = tokens[:, -1:]
out = self.transformer(tokens, cache=cache)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
if exists(cache) and cache.get('offset'):
logits_mask = logits_mask[:, -1:]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if exists(cache):
cache['offset'] = cache.get('offset', 0) + logits.shape[1]
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.