repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BVQI | BVQI-master/pyiqa/models/nima_model.py | from collections import OrderedDict
import torch
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class NIMAModel(GeneralIQAModel):
"""General module to train an IQA network."""
def feed_data(self, data):
self.img_input = data["img"].to(self.device)
self.gt_mos = data["mos_label"].to(self.device)
self.gt_mos_dist = data["mos_dist"].to(self.device)
self.use_ref = False
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_opt = train_opt["optim"]
optim_params = [
{
"params": self.get_bare_model(self.net).base_model.parameters(),
"lr": optim_opt.pop("lr_basemodel"),
},
{
"params": self.get_bare_model(self.net).classifier.parameters(),
"lr": optim_opt.pop("lr_classifier"),
},
]
optim_type = optim_opt.pop("type")
self.optimizer = self.get_optimizer(optim_type, optim_params, **optim_opt)
self.optimizers.append(self.optimizer)
def test(self):
self.net.eval()
with torch.no_grad():
self.output_score = self.net(
self.img_input, return_mos=True, return_dist=False
)
self.net.train()
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
self.output_mos, self.output_dist = self.net(
self.img_input, return_mos=True, return_dist=True
)
l_total = 0
loss_dict = OrderedDict()
if self.cri_mos:
l_mos = self.cri_mos(self.output_dist, self.gt_mos_dist)
l_total += l_mos
loss_dict["l_mos"] = l_mos
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
pred_score = self.output_mos.squeeze(1).cpu().detach().numpy()
gt_mos = self.gt_mos.squeeze(1).cpu().detach().numpy()
for name, opt_ in self.opt["val"]["metrics"].items():
self.log_dict[f"train_metrics/{name}"] = calculate_metric(
[pred_score, gt_mos], opt_
)
| 2,311 | 31.111111 | 82 | py |
BVQI | BVQI-master/pyiqa/metrics/correlation_coefficient.py | import numpy as np
from scipy import stats
from scipy.optimize import curve_fit
from pyiqa.utils.registry import METRIC_REGISTRY
def fit_curve(x, y, curve_type="logistic_4params"):
r"""Fit the scale of predict scores to MOS scores using logistic regression suggested by VQEG.
The function with 4 params is more commonly used.
The 5 params function takes from DBCNN:
- https://github.com/zwx8981/DBCNN/blob/master/dbcnn/tools/verify_performance.m
"""
assert curve_type in [
"logistic_4params",
"logistic_5params",
], f"curve type should be in [logistic_4params, logistic_5params], but got {curve_type}."
betas_init_4params = [np.max(y), np.min(y), np.mean(x), np.std(x) / 4.0]
def logistic_4params(x, beta1, beta2, beta3, beta4):
yhat = (beta1 - beta2) / (1 + np.exp(-(x - beta3) / beta4)) + beta2
return yhat
betas_init_5params = [10, 0, np.mean(y), 0.1, 0.1]
def logistic_5params(x, beta1, beta2, beta3, beta4, beta5):
logistic_part = 0.5 - 1.0 / (1 + np.exp(beta2 * (x - beta3)))
yhat = beta1 * logistic_part + beta4 * x + beta5
return yhat
if curve_type == "logistic_4params":
logistic = logistic_4params
betas_init = betas_init_4params
elif curve_type == "logistic_5params":
logistic = logistic_5params
betas_init = betas_init_5params
betas, _ = curve_fit(logistic, x, y, p0=betas_init)
yhat = logistic(x, *betas)
return yhat
@METRIC_REGISTRY.register()
def calculate_rmse(x, y, fit_scale=None, eps=1e-8):
if fit_scale is not None:
x = fit_curve(x, y, fit_scale)
return np.sqrt(np.mean((x - y) ** 2) + eps)
@METRIC_REGISTRY.register()
def calculate_plcc(x, y, fit_scale=None):
if fit_scale is not None:
x = fit_curve(x, y, fit_scale)
return stats.pearsonr(x, y)[0]
@METRIC_REGISTRY.register()
def calculate_srcc(x, y):
return stats.spearmanr(x, y)[0]
@METRIC_REGISTRY.register()
def calculate_krcc(x, y):
return stats.kendalltau(x, y)[0]
| 2,057 | 29.264706 | 98 | py |
BVQI | BVQI-master/pyiqa/metrics/other_metrics.py | import numpy as np
from scipy import stats
from pyiqa.utils.registry import METRIC_REGISTRY
@METRIC_REGISTRY.register()
def calculate_2afc_score(d0, d1, gts, **kwargs):
scores = (d0 < d1) * (1 - gts) + (d0 > d1) * gts + (d0 == d1) * 0.5
return np.mean(scores)
| 271 | 23.727273 | 71 | py |
BVQI | BVQI-master/pyiqa/metrics/__init__.py | from copy import deepcopy
from pyiqa.utils.registry import METRIC_REGISTRY
from .correlation_coefficient import calculate_krcc, calculate_plcc, calculate_srcc
__all__ = [
"calculate_srcc",
"calculate_plcc",
"calculate_krcc",
]
def calculate_metric(data, opt):
"""Calculate metric from data and options.
Args:
opt (dict): Configuration. It must contain:
type (str): Model type.
"""
opt = deepcopy(opt)
metric_type = opt.pop("type")
metric = METRIC_REGISTRY.get(metric_type)(*data, **opt)
return metric
| 567 | 21.72 | 83 | py |
BVQI | BVQI-master/pyiqa/.ipynb_checkpoints/version-checkpoint.py | # GENERATED VERSION FILE
# TIME: Wed Dec 7 13:57:45 2022
__version__ = "0.1.5"
__gitsha__ = "3619109"
version_info = (0, 1, 5)
| 128 | 20.5 | 32 | py |
BVQI | BVQI-master/pyiqa/utils/lmdb_util.py | import sys
from multiprocessing import Pool
from os import path as osp
import cv2
import lmdb
from tqdm import tqdm
def make_lmdb_from_imgs(
data_path,
lmdb_path,
img_path_list,
keys,
batch=5000,
compress_level=1,
multiprocessing_read=False,
n_thread=40,
map_size=None,
):
"""Make lmdb from images.
Contents of lmdb. The file structure is:
example.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records 1)image name (with extension),
2)image shape, and 3)compression level, separated by a white space.
For example, the meta information could be:
`000_00000000.png (720,1280,3) 1`, which means:
1) image name (with extension): 000_00000000.png;
2) image shape: (720,1280,3);
3) compression level: 1
We use the image name without extension as the lmdb key.
If `multiprocessing_read` is True, it will read all the images to memory
using multiprocessing. Thus, your server needs to have enough memory.
Args:
data_path (str): Data path for reading images.
lmdb_path (str): Lmdb save path.
img_path_list (str): Image path list.
keys (str): Used for lmdb keys.
batch (int): After processing batch images, lmdb commits.
Default: 5000.
compress_level (int): Compress level when encoding images. Default: 1.
multiprocessing_read (bool): Whether use multiprocessing to read all
the images to memory. Default: False.
n_thread (int): For multiprocessing.
map_size (int | None): Map size for lmdb env. If None, use the
estimated size from images. Default: None
"""
assert len(img_path_list) == len(keys), (
"img_path_list and keys should have the same length, "
f"but got {len(img_path_list)} and {len(keys)}"
)
print(f"Create lmdb for {data_path}, save to {lmdb_path}...")
print(f"Totoal images: {len(img_path_list)}")
if not lmdb_path.endswith(".lmdb"):
raise ValueError("lmdb_path must end with '.lmdb'.")
if osp.exists(lmdb_path):
print(f"Folder {lmdb_path} already exists. Exit.")
sys.exit(1)
if multiprocessing_read:
# read all the images to memory (multiprocessing)
dataset = {} # use dict to keep the order for multiprocessing
shapes = {}
print(f"Read images with multiprocessing, #thread: {n_thread} ...")
pbar = tqdm(total=len(img_path_list), unit="image")
def callback(arg):
"""get the image data and update pbar."""
key, dataset[key], shapes[key] = arg
pbar.update(1)
pbar.set_description(f"Read {key}")
pool = Pool(n_thread)
for path, key in zip(img_path_list, keys):
pool.apply_async(
read_img_worker,
args=(osp.join(data_path, path), key, compress_level),
callback=callback,
)
pool.close()
pool.join()
pbar.close()
print(f"Finish reading {len(img_path_list)} images.")
# create lmdb environment
if map_size is None:
# obtain data size for one image
img = cv2.imread(osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED)
_, img_byte = cv2.imencode(
".png", img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]
)
data_size_per_img = img_byte.nbytes
print("Data size per image is: ", data_size_per_img)
data_size = data_size_per_img * len(img_path_list)
map_size = data_size * 10
env = lmdb.open(lmdb_path, map_size=map_size)
# write data to lmdb
pbar = tqdm(total=len(img_path_list), unit="chunk")
txn = env.begin(write=True)
txt_file = open(osp.join(lmdb_path, "meta_info.txt"), "w")
for idx, (path, key) in enumerate(zip(img_path_list, keys)):
pbar.update(1)
pbar.set_description(f"Write {key}")
key_byte = key.encode("ascii")
if multiprocessing_read:
img_byte = dataset[key]
h, w, c = shapes[key]
else:
_, img_byte, img_shape = read_img_worker(
osp.join(data_path, path), key, compress_level
)
h, w, c = img_shape
txn.put(key_byte, img_byte)
# write meta information
txt_file.write(f"{key}.png ({h},{w},{c}) {compress_level}\n")
if idx % batch == 0:
txn.commit()
txn = env.begin(write=True)
pbar.close()
txn.commit()
env.close()
txt_file.close()
print("\nFinish writing lmdb.")
def read_img_worker(path, key, compress_level):
"""Read image worker.
Args:
path (str): Image path.
key (str): Image key.
compress_level (int): Compress level when encoding images.
Returns:
str: Image key.
byte: Image byte.
tuple[int]: Image shape.
"""
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if img.ndim == 2:
h, w = img.shape
c = 1
else:
h, w, c = img.shape
_, img_byte = cv2.imencode(
".png", img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]
)
return (key, img_byte, (h, w, c))
class LmdbMaker:
"""LMDB Maker.
Args:
lmdb_path (str): Lmdb save path.
map_size (int): Map size for lmdb env. Default: 1024 ** 4, 1TB.
batch (int): After processing batch images, lmdb commits.
Default: 5000.
compress_level (int): Compress level when encoding images. Default: 1.
"""
def __init__(self, lmdb_path, map_size=1024 ** 4, batch=5000, compress_level=1):
if not lmdb_path.endswith(".lmdb"):
raise ValueError("lmdb_path must end with '.lmdb'.")
if osp.exists(lmdb_path):
print(f"Folder {lmdb_path} already exists. Exit.")
sys.exit(1)
self.lmdb_path = lmdb_path
self.batch = batch
self.compress_level = compress_level
self.env = lmdb.open(lmdb_path, map_size=map_size)
self.txn = self.env.begin(write=True)
self.txt_file = open(osp.join(lmdb_path, "meta_info.txt"), "w")
self.counter = 0
def put(self, img_byte, key, img_shape):
self.counter += 1
key_byte = key.encode("ascii")
self.txn.put(key_byte, img_byte)
# write meta information
h, w, c = img_shape
self.txt_file.write(f"{key}.png ({h},{w},{c}) {self.compress_level}\n")
if self.counter % self.batch == 0:
self.txn.commit()
self.txn = self.env.begin(write=True)
def close(self):
self.txn.commit()
self.env.close()
self.txt_file.close()
| 7,059 | 32.301887 | 85 | py |
BVQI | BVQI-master/pyiqa/utils/download_util.py | import math
import os
from urllib.parse import urlparse
import requests
from torch.hub import download_url_to_file, get_dir
from tqdm import tqdm
from .misc import sizeof_fmt
def download_file_from_google_drive(file_id, save_path):
"""Download files from google drive.
Ref:
https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501
Args:
file_id (str): File id.
save_path (str): Save path.
"""
session = requests.Session()
URL = "https://docs.google.com/uc?export=download"
params = {"id": file_id}
response = session.get(URL, params=params, stream=True)
token = get_confirm_token(response)
if token:
params["confirm"] = token
response = session.get(URL, params=params, stream=True)
# get file size
response_file_size = session.get(
URL, params=params, stream=True, headers={"Range": "bytes=0-2"}
)
if "Content-Range" in response_file_size.headers:
file_size = int(response_file_size.headers["Content-Range"].split("/")[1])
else:
file_size = None
save_response_content(response, save_path, file_size)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination, file_size=None, chunk_size=32768):
if file_size is not None:
pbar = tqdm(total=math.ceil(file_size / chunk_size), unit="chunk")
readable_file_size = sizeof_fmt(file_size)
else:
pbar = None
with open(destination, "wb") as f:
downloaded_size = 0
for chunk in response.iter_content(chunk_size):
downloaded_size += chunk_size
if pbar is not None:
pbar.update(1)
pbar.set_description(
f"Download {sizeof_fmt(downloaded_size)} / {readable_file_size}"
)
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if pbar is not None:
pbar.close()
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
"""Load file form http url, will download models if necessary.
Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
Args:
url (str): URL to be downloaded.
model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir.
Default: None.
progress (bool): Whether to show the download progress. Default: True.
file_name (str): The downloaded file name. If None, use the file name in the url. Default: None.
Returns:
str: The path to the downloaded file.
"""
if model_dir is None: # use the pytorch hub_dir
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
os.makedirs(model_dir, exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.abspath(os.path.join(model_dir, filename))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
return cached_file
| 3,398 | 31.371429 | 116 | py |
BVQI | BVQI-master/pyiqa/utils/registry.py | # Modified from: https://github.com/facebookresearch/fvcore/blob/master/fvcore/common/registry.py # noqa: E501
class Registry:
"""
The registry that provides name -> object mapping, to support third-party
users' custom modules.
To create a registry (e.g. a backbone registry):
.. code-block:: python
BACKBONE_REGISTRY = Registry('BACKBONE')
To register an object:
.. code-block:: python
@BACKBONE_REGISTRY.register()
class MyBackbone():
...
Or:
.. code-block:: python
BACKBONE_REGISTRY.register(MyBackbone)
"""
def __init__(self, name):
"""
Args:
name (str): the name of this registry
"""
self._name = name
self._obj_map = {}
def _do_register(self, name, obj):
assert name not in self._obj_map, (
f"An object named '{name}' was already registered "
f"in '{self._name}' registry!"
)
self._obj_map[name] = obj
def register(self, obj=None):
"""
Register the given object under the the name `obj.__name__`.
Can be used as either a decorator or not.
See docstring of this class for usage.
"""
if obj is None:
# used as a decorator
def deco(func_or_class):
name = func_or_class.__name__
self._do_register(name, func_or_class)
return func_or_class
return deco
# used as a function call
name = obj.__name__
self._do_register(name, obj)
def get(self, name):
ret = self._obj_map.get(name)
if ret is None:
raise KeyError(
f"No object named '{name}' found in '{self._name}' registry!"
)
return ret
def __contains__(self, name):
return name in self._obj_map
def __iter__(self):
return iter(self._obj_map.items())
def keys(self):
return self._obj_map.keys()
DATASET_REGISTRY = Registry("dataset")
ARCH_REGISTRY = Registry("arch")
MODEL_REGISTRY = Registry("model")
LOSS_REGISTRY = Registry("loss")
METRIC_REGISTRY = Registry("metric")
| 2,200 | 24.298851 | 111 | py |
BVQI | BVQI-master/pyiqa/utils/misc.py | import os
import random
import shutil
import time
from os import path as osp
import numpy as np
import torch
from .dist_util import master_only
def set_random_seed(seed):
"""Set random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_time_str():
return time.strftime("%Y%m%d_%H%M%S", time.localtime())
def mkdir_and_rename(path):
"""mkdirs. If path exists, rename it with timestamp, create a new one, and move it to archive folder.
Args:
path (str): Folder path.
"""
if osp.exists(path):
new_name = path + "_archived_" + get_time_str()
new_name = new_name.replace("tb_logger", "tb_logger_archived")
print(f"Path already exists. Rename it to {new_name}", flush=True)
os.rename(path, new_name)
os.makedirs(path, exist_ok=True)
@master_only
def make_exp_dirs(opt):
"""Make dirs for experiments."""
path_opt = opt["path"].copy()
if opt["is_train"]:
mkdir_and_rename(path_opt.pop("experiments_root"))
else:
mkdir_and_rename(path_opt.pop("results_root"))
for key, path in path_opt.items():
if (
("strict_load" in key)
or ("pretrain_network" in key)
or ("resume" in key)
or ("param_key" in key)
):
continue
else:
os.makedirs(path, exist_ok=True)
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative paths.
"""
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, suffix, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith(".") and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if suffix is None:
yield return_path
elif return_path.endswith(suffix):
yield return_path
else:
if recursive:
yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
else:
continue
return _scandir(dir_path, suffix=suffix, recursive=recursive)
def check_resume(opt, resume_iter):
"""Check resume states and pretrain_network paths.
Args:
opt (dict): Options.
resume_iter (int): Resume iteration.
"""
if opt["path"]["resume_state"]:
# get all the networks
networks = [key for key in opt.keys() if key.startswith("network_")]
flag_pretrain = False
for network in networks:
if opt["path"].get(f"pretrain_{network}") is not None:
flag_pretrain = True
if flag_pretrain:
print("pretrain_network path will be ignored during resuming.")
# set pretrained model paths
for network in networks:
name = f"pretrain_{network}"
basename = network.replace("network_", "")
if opt["path"].get("ignore_resume_networks") is None or (
network not in opt["path"]["ignore_resume_networks"]
):
opt["path"][name] = osp.join(
opt["path"]["models"], f"net_{basename}_{resume_iter}.pth"
)
print(f"Set {name} to {opt['path'][name]}")
# change param_key to params in resume
param_keys = [key for key in opt["path"].keys() if key.startswith("param_key")]
for param_key in param_keys:
if opt["path"][param_key] == "params_ema":
opt["path"][param_key] = "params"
print(f"Set {param_key} to params")
def sizeof_fmt(size, suffix="B"):
"""Get human readable file size.
Args:
size (int): File size.
suffix (str): Suffix. Default: 'B'.
Return:
str: Formatted file siz.
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(size) < 1024.0:
return f"{size:3.1f} {unit}{suffix}"
size /= 1024.0
return f"{size:3.1f} Y{suffix}"
| 4,827 | 30.555556 | 105 | py |
BVQI | BVQI-master/pyiqa/utils/logger.py | import datetime
import logging
import time
from .dist_util import get_dist_info, master_only
initialized_logger = {}
class AvgTimer:
def __init__(self, window=200):
self.window = window # average window
self.current_time = 0
self.total_time = 0
self.count = 0
self.avg_time = 0
self.start()
def start(self):
self.start_time = self.tic = time.time()
def record(self):
self.count += 1
self.toc = time.time()
self.current_time = self.toc - self.tic
self.total_time += self.current_time
# calculate average time
self.avg_time = self.total_time / self.count
# reset
if self.count > self.window:
self.count = 0
self.total_time = 0
self.tic = time.time()
def get_current_time(self):
return self.current_time
def get_avg_time(self):
return self.avg_time
class MessageLogger:
"""Message logger for printing.
Args:
opt (dict): Config. It contains the following keys:
name (str): Exp name.
logger (dict): Contains 'print_freq' (str) for logger interval.
train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt["name"]
self.interval = opt["logger"]["print_freq"]
self.start_iter = start_iter
self.max_iters = opt["train"]["total_iter"]
self.use_tb_logger = opt["logger"]["use_tb_logger"]
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def reset_start_time(self):
self.start_time = time.time()
@master_only
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop("epoch")
current_iter = log_vars.pop("iter")
lrs = log_vars.pop("lrs")
message = (
f"[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:("
)
for v in lrs:
message += f"{v:.3e},"
message += ")] "
# time and estimated time
if "time" in log_vars.keys():
iter_time = log_vars.pop("time")
data_time = log_vars.pop("data_time")
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f"[eta: {eta_str}, "
message += f"time (data): {iter_time:.3f} ({data_time:.3f})] "
# other items, especially losses
for k, v in log_vars.items():
message += f"{k}: {v:.4e} "
# tensorboard logger
if self.use_tb_logger and "debug" not in self.exp_name:
if k.startswith("l_"):
self.tb_logger.add_scalar(f"losses/{k}", v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message)
@master_only
def init_tb_logger(log_dir):
from torch.utils.tensorboard import SummaryWriter
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
@master_only
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
import wandb
logger = get_root_logger()
project = opt["logger"]["wandb"]["project"]
resume_id = opt["logger"]["wandb"].get("resume_id")
if resume_id:
wandb_id = resume_id
resume = "allow"
logger.warning(f"Resume wandb logger with id={wandb_id}.")
else:
wandb_id = wandb.util.generate_id()
resume = "never"
wandb.init(
id=wandb_id,
resume=resume,
name=opt["name"],
config=opt,
project=project,
sync_tensorboard=True,
)
logger.info(f"Use wandb logger with id={wandb_id}; project={project}.")
def get_root_logger(logger_name="pyiqa", log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = "%(asctime)s %(levelname)s: %(message)s"
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel("ERROR")
elif log_file is not None:
logger.setLevel(log_level)
# add file handler
file_handler = logging.FileHandler(log_file, "w")
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
initialized_logger[logger_name] = True
return logger
def get_env_info():
"""Get environment information.
Currently, only log the software version.
"""
import torch
import torchvision
# from basicsr.version import __version__
# msg = r"""
# ____ _ _____ ____
# / __ ) ____ _ _____ (_)_____/ ___/ / __ \
# / __ |/ __ `// ___// // ___/\__ \ / /_/ /
# / /_/ // /_/ /(__ )/ // /__ ___/ // _, _/
# /_____/ \__,_//____//_/ \___//____//_/ |_|
# ______ __ __ __ __
# / ____/____ ____ ____/ / / / __ __ _____ / /__ / /
# / / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / /
# / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/
# \____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_)
# """
msg = (
"\nVersion Information: "
# f'\n\tBasicSR: {__version__}'
f"\n\tPyTorch: {torch.__version__}"
f"\n\tTorchVision: {torchvision.__version__}"
)
return msg
| 7,216 | 30.933628 | 86 | py |
BVQI | BVQI-master/pyiqa/utils/file_client.py | # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501
from abc import ABCMeta, abstractmethod
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: ``get()`` and ``get_text()``.
``get()`` reads the file as a byte stream and ``get_text()`` reads the file
as texts.
"""
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
class MemcachedBackend(BaseStorageBackend):
"""Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
"""
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if sys_path is not None:
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError("Please install memcached to enable MemcachedBackend.")
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(
self.server_list_cfg, self.client_cfg
)
# mc.pyvector servers as a point which points to a memory cache
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
def get(self, filepath):
filepath = str(filepath)
with open(filepath, "rb") as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
filepath = str(filepath)
with open(filepath, "r") as f:
value_buf = f.read()
return value_buf
class LmdbBackend(BaseStorageBackend):
"""Lmdb storage backend.
Args:
db_paths (str | list[str]): Lmdb database paths.
client_keys (str | list[str]): Lmdb client keys. Default: 'default'.
readonly (bool, optional): Lmdb environment parameter. If True,
disallow any write operations. Default: True.
lock (bool, optional): Lmdb environment parameter. If False, when
concurrent access occurs, do not lock the database. Default: False.
readahead (bool, optional): Lmdb environment parameter. If False,
disable the OS filesystem readahead mechanism, which may improve
random read performance when a database is larger than RAM.
Default: False.
Attributes:
db_paths (list): Lmdb database path.
_client (list): A list of several lmdb envs.
"""
def __init__(
self,
db_paths,
client_keys="default",
readonly=True,
lock=False,
readahead=False,
**kwargs,
):
try:
import lmdb
except ImportError:
raise ImportError("Please install lmdb to enable LmdbBackend.")
if isinstance(client_keys, str):
client_keys = [client_keys]
if isinstance(db_paths, list):
self.db_paths = [str(v) for v in db_paths]
elif isinstance(db_paths, str):
self.db_paths = [str(db_paths)]
assert len(client_keys) == len(self.db_paths), (
"client_keys and db_paths should have the same length, "
f"but received {len(client_keys)} and {len(self.db_paths)}."
)
self._client = {}
for client, path in zip(client_keys, self.db_paths):
self._client[client] = lmdb.open(
path, readonly=readonly, lock=lock, readahead=readahead, **kwargs
)
def get(self, filepath, client_key):
"""Get values according to the filepath from one lmdb named client_key.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
client_key (str): Used for distinguishing different lmdb envs.
"""
filepath = str(filepath)
assert client_key in self._client, (
f"client_key {client_key} is not " "in lmdb clients."
)
client = self._client[client_key]
with client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode("ascii"))
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class FileClient(object):
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
"disk": HardDiskBackend,
"memcached": MemcachedBackend,
"lmdb": LmdbBackend,
}
def __init__(self, backend="disk", **kwargs):
if backend not in self._backends:
raise ValueError(
f"Backend {backend} is not supported. Currently supported ones"
f" are {list(self._backends.keys())}"
)
self.backend = backend
self.client = self._backends[backend](**kwargs)
def get(self, filepath, client_key="default"):
# client_key is used only for lmdb, where different fileclients have
# different lmdb environments.
if self.backend == "lmdb":
return self.client.get(filepath, client_key)
else:
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
| 6,151 | 31.723404 | 103 | py |
BVQI | BVQI-master/pyiqa/utils/img_util.py | import io
import math
import os
import cv2
import numpy as np
import torch
import torchvision.transforms.functional as TF
from PIL import Image
from torchvision.utils import make_grid
IMG_EXTENSIONS = [
".jpg",
".JPG",
".jpeg",
".JPEG",
".png",
".PNG",
".ppm",
".PPM",
".bmp",
".BMP",
".tif",
".TIF",
".tiff",
".TIFF",
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def imread2tensor(img_source, rgb=False):
"""Read image to tensor.
Args:
img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance
rgb: convert input to RGB if true
"""
print("This is also the one")
if type(img_source) == bytes:
img = Image.open(io.BytesIO(img_source))
elif type(img_source) == str:
assert is_image_file(img_source), f"{img_source} is not a valid image file."
img = Image.open(img_source)
elif type(img_source) == Image.Image:
img = img_source
else:
raise Exception("Unsupported source type")
if rgb:
img = img.convert("RGB")
img_tensor = TF.to_tensor(img)
# print(img_tensor.size())
# torch.save(img_tensor, "./myTensor.pt")
return img_tensor
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == "float64":
img = img.astype("float32")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (
torch.is_tensor(tensor)
or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))
):
raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}")
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False
).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError(
f"Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}"
)
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)):
"""This implementation is slightly faster than tensor2img.
It now only supports torch tensor with shape (1, c, h, w).
Args:
tensor (Tensor): Now only support torch tensor with (1, c, h, w).
rgb2bgr (bool): Whether to change rgb to bgr. Default: True.
min_max (tuple[int]): min and max values for clamp.
"""
output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0)
output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255
output = output.type(torch.uint8).cpu().numpy()
if rgb2bgr:
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def imfrombytes(content, flag="color", float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {
"color": cv2.IMREAD_COLOR,
"grayscale": cv2.IMREAD_GRAYSCALE,
"unchanged": cv2.IMREAD_UNCHANGED,
}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.0
return img
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
ok = cv2.imwrite(file_path, img, params)
if not ok:
raise IOError("Failed in writing images.")
def crop_border(imgs, crop_border):
"""Crop borders of images.
Args:
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
crop_border (int): Crop border for each end of height and weight.
Returns:
list[ndarray]: Cropped images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [
v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs
]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border, ...]
| 7,559 | 31.033898 | 123 | py |
BVQI | BVQI-master/pyiqa/utils/options.py | import argparse
import random
from collections import OrderedDict
from os import path as osp
import torch
import yaml
from pyiqa.utils import set_random_seed
from pyiqa.utils.dist_util import get_dist_info, init_dist, master_only
def ordered_yaml():
"""Support OrderedDict for yaml.
Returns:
yaml Loader and Dumper.
"""
try:
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Dumper, Loader
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
return Loader, Dumper
def dict2str(opt, indent_level=1):
"""dict to string for printing options.
Args:
opt (dict): Option dict.
indent_level (int): Indent level. Default: 1.
Return:
(str): Option string for printing.
"""
msg = "\n"
for k, v in opt.items():
if isinstance(v, dict):
msg += " " * (indent_level * 2) + k + ":["
msg += dict2str(v, indent_level + 1)
msg += " " * (indent_level * 2) + "]\n"
else:
msg += " " * (indent_level * 2) + k + ": " + str(v) + "\n"
return msg
def _postprocess_yml_value(value):
# None
if value == "~" or value.lower() == "none":
return None
# bool
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
# !!float number
if value.startswith("!!float"):
return float(value.replace("!!float", ""))
# number
if value.isdigit():
return int(value)
elif value.replace(".", "", 1).isdigit() and value.count(".") < 2:
return float(value)
# list
if value.startswith("["):
return eval(value)
# str
return value
def make_paths(opt, root_path):
if opt["is_train"]:
experiments_root = osp.join(root_path, "experiments", opt["name"])
opt["path"]["experiments_root"] = experiments_root
opt["path"]["models"] = osp.join(experiments_root, "models")
opt["path"]["training_states"] = osp.join(experiments_root, "training_states")
opt["path"]["log"] = experiments_root
opt["path"]["visualization"] = osp.join(experiments_root, "visualization")
# change some options for debug mode
if "debug" in opt["name"]:
if "val" in opt:
opt["val"]["val_freq"] = 7
opt["logger"]["print_freq"] = 1
opt["logger"]["save_checkpoint_freq"] = 7
else: # test
results_root = osp.join(root_path, "results", opt["name"])
opt["path"]["results_root"] = results_root
opt["path"]["log"] = results_root
opt["path"]["visualization"] = osp.join(results_root, "visualization")
def parse_options(root_path, is_train=True):
parser = argparse.ArgumentParser()
parser.add_argument(
"-opt", type=str, required=True, help="Path to option YAML file."
)
parser.add_argument(
"--launcher",
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--auto_resume", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--force_yml",
nargs="+",
default=None,
help="Force to update yml files. Examples: train:ema_decay=0.999",
)
args = parser.parse_args()
# parse yml to dict
with open(args.opt, mode="r") as f:
opt = yaml.load(f, Loader=ordered_yaml()[0])
# distributed settings
if args.launcher == "none":
opt["dist"] = False
print("Disable distributed.", flush=True)
else:
opt["dist"] = True
if args.launcher == "slurm" and "dist_params" in opt:
init_dist(args.launcher, **opt["dist_params"])
else:
init_dist(args.launcher)
opt["rank"], opt["world_size"] = get_dist_info()
# random seed
seed = opt.get("manual_seed")
if seed is None:
seed = random.randint(1, 10000)
opt["manual_seed"] = seed
set_random_seed(seed + opt["rank"])
# force to update yml options
if args.force_yml is not None:
for entry in args.force_yml:
# now do not support creating new keys
keys, value = entry.split("=")
keys, value = keys.strip(), value.strip()
value = _postprocess_yml_value(value)
eval_str = "opt"
for key in keys.split(":"):
eval_str += f'["{key}"]'
eval_str += "=value"
# using exec function
exec(eval_str)
opt["auto_resume"] = args.auto_resume
opt["is_train"] = is_train
# debug setting
if args.debug and not opt["name"].startswith("debug"):
opt["name"] = "debug_" + opt["name"]
if opt["num_gpu"] == "auto":
opt["num_gpu"] = torch.cuda.device_count()
# datasets
for phase, dataset in opt["datasets"].items():
# for multiple datasets, e.g., val_1, val_2; test_1, test_2
phase = phase.split("_")[0]
dataset["phase"] = phase
if "scale" in opt:
dataset["scale"] = opt["scale"]
if dataset.get("dataroot_gt") is not None:
dataset["dataroot_gt"] = osp.expanduser(dataset["dataroot_gt"])
if dataset.get("dataroot_lq") is not None:
dataset["dataroot_lq"] = osp.expanduser(dataset["dataroot_lq"])
# paths
for key, val in opt["path"].items():
if (val is not None) and ("resume_state" in key or "pretrain_network" in key):
opt["path"][key] = osp.expanduser(val)
make_paths(opt, root_path)
return opt, args
@master_only
def copy_opt_file(opt_file, experiments_root):
# copy the yml file to the experiment root
import sys
import time
from shutil import copyfile
cmd = " ".join(sys.argv)
filename = osp.join(experiments_root, osp.basename(opt_file))
copyfile(opt_file, filename)
with open(filename, "r+") as f:
lines = f.readlines()
lines.insert(0, f"# GENERATE TIME: {time.asctime()}\n# CMD:\n# {cmd}\n\n")
f.seek(0)
f.writelines(lines)
| 6,559 | 29.943396 | 86 | py |
BVQI | BVQI-master/pyiqa/utils/__init__.py | from .color_util import rgb2ycbcr, ycbcr2rgb
from .file_client import FileClient
from .img_util import (
crop_border,
imfrombytes,
img2tensor,
imread2tensor,
imwrite,
tensor2img,
)
from .logger import (
AvgTimer,
MessageLogger,
get_env_info,
get_root_logger,
init_tb_logger,
init_wandb_logger,
)
from .misc import (
check_resume,
get_time_str,
make_exp_dirs,
mkdir_and_rename,
scandir,
set_random_seed,
sizeof_fmt,
)
__all__ = [
# file_client.py
"FileClient",
# img_util.py
"imread2tensor",
"img2tensor",
"tensor2img",
"imfrombytes",
"imwrite",
"crop_border",
# logger.py
"MessageLogger",
"AvgTimer",
"init_tb_logger",
"init_wandb_logger",
"get_root_logger",
"get_env_info",
# misc.py
"set_random_seed",
"get_time_str",
"mkdir_and_rename",
"make_exp_dirs",
"scandir",
"check_resume",
"sizeof_fmt",
# color util
"rgb2ycbcr",
"ycbcr2rgb",
]
| 1,024 | 16.672414 | 44 | py |
BVQI | BVQI-master/pyiqa/utils/color_util.py | r"""Color space conversion functions
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/functional/colour_conversion.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
from typing import Dict, Union
import torch
def safe_frac_pow(x: torch.Tensor, p) -> torch.Tensor:
EPS = torch.finfo(x.dtype).eps
return torch.sign(x) * torch.abs(x + EPS).pow(p)
def to_y_channel(
img: torch.Tensor, out_data_range: float = 1.0, color_space: str = "yiq"
) -> torch.Tensor:
r"""Change to Y channel
Args:
image tensor: tensor with shape (N, 3, H, W) in range [0, 1].
Returns:
image tensor: Y channel of the input tensor
"""
assert (
img.ndim == 4 and img.shape[1] == 3
), "input image tensor should be RGB image batches with shape (N, 3, H, W)"
color_space = color_space.lower()
if color_space == "yiq":
img = rgb2yiq(img)
elif color_space == "ycbcr":
img = rgb2ycbcr(img)
elif color_space == "lhm":
img = rgb2lhm(img)
out_img = img[:, [0], :, :] * out_data_range
if out_data_range >= 255:
# differentiable round with pytorch
out_img = out_img - out_img.detach() + out_img.round()
return out_img
def rgb2ycbcr(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of YCbCr images
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
Args:
x: Batch of images with shape (N, 3, H, W). RGB color space, range [0, 1].
Returns:
Batch of images with shape (N, 3, H, W). YCbCr color space.
"""
weights_rgb_to_ycbcr = torch.tensor(
[
[65.481, -37.797, 112.0],
[128.553, -74.203, -93.786],
[24.966, 112.0, -18.214],
]
).to(x)
bias_rgb_to_ycbcr = torch.tensor([16, 128, 128]).view(1, 3, 1, 1).to(x)
x_ycbcr = (
torch.matmul(x.permute(0, 2, 3, 1), weights_rgb_to_ycbcr).permute(0, 3, 1, 2)
+ bias_rgb_to_ycbcr
)
x_ycbcr = x_ycbcr / 255.0
return x_ycbcr
def ycbcr2rgb(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of YCbCr images to a batch of RGB images
It implements the inversion of the above rgb2ycbcr function.
Args:
x: Batch of images with shape (N, 3, H, W). YCbCr color space, range [0, 1].
Returns:
Batch of images with shape (N, 3, H, W). RGB color space.
"""
x = x * 255.0
weights_ycbcr_to_rgb = (
255.0
* torch.tensor(
[
[0.00456621, 0.00456621, 0.00456621],
[0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0],
]
).to(x)
)
bias_ycbcr_to_rgb = (
torch.tensor([-222.921, 135.576, -276.836]).view(1, 3, 1, 1).to(x)
)
x_rgb = (
torch.matmul(x.permute(0, 2, 3, 1), weights_ycbcr_to_rgb).permute(0, 3, 1, 2)
+ bias_ycbcr_to_rgb
)
x_rgb = x_rgb / 255.0
return x_rgb
def rgb2lmn(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of LMN images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). LMN colour space.
"""
weights_rgb_to_lmn = (
torch.tensor([[0.06, 0.63, 0.27], [0.30, 0.04, -0.35], [0.34, -0.6, 0.17]])
.t()
.to(x)
)
x_lmn = torch.matmul(x.permute(0, 2, 3, 1), weights_rgb_to_lmn).permute(0, 3, 1, 2)
return x_lmn
def rgb2xyz(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of XYZ images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). XYZ colour space.
"""
mask_below = (x <= 0.04045).to(x)
mask_above = (x > 0.04045).to(x)
tmp = x / 12.92 * mask_below + torch.pow((x + 0.055) / 1.055, 2.4) * mask_above
weights_rgb_to_xyz = torch.tensor(
[
[0.4124564, 0.3575761, 0.1804375],
[0.2126729, 0.7151522, 0.0721750],
[0.0193339, 0.1191920, 0.9503041],
]
).to(x)
x_xyz = torch.matmul(tmp.permute(0, 2, 3, 1), weights_rgb_to_xyz.t()).permute(
0, 3, 1, 2
)
return x_xyz
def xyz2lab(
x: torch.Tensor, illuminant: str = "D50", observer: str = "2"
) -> torch.Tensor:
r"""Convert a batch of XYZ images to a batch of LAB images
Args:
x: Batch of images with shape (N, 3, H, W). XYZ colour space.
illuminant: {“A”, “D50”, “D55”, “D65”, “D75”, “E”}, optional. The name of the illuminant.
observer: {“2”, “10”}, optional. The aperture angle of the observer.
Returns:
Batch of images with shape (N, 3, H, W). LAB colour space.
"""
epsilon = 0.008856
kappa = 903.3
illuminants: Dict[str, Dict] = {
"A": {
"2": (1.098466069456375, 1, 0.3558228003436005),
"10": (1.111420406956693, 1, 0.3519978321919493),
},
"D50": {
"2": (0.9642119944211994, 1, 0.8251882845188288),
"10": (0.9672062750333777, 1, 0.8142801513128616),
},
"D55": {
"2": (0.956797052643698, 1, 0.9214805860173273),
"10": (0.9579665682254781, 1, 0.9092525159847462),
},
"D65": {
"2": (0.95047, 1.0, 1.08883), # This was: `lab_ref_white`
"10": (0.94809667673716, 1, 1.0730513595166162),
},
"D75": {
"2": (0.9497220898840717, 1, 1.226393520724154),
"10": (0.9441713925645873, 1, 1.2064272211720228),
},
"E": {"2": (1.0, 1.0, 1.0), "10": (1.0, 1.0, 1.0)},
}
illuminants_to_use = (
torch.tensor(illuminants[illuminant][observer]).to(x).view(1, 3, 1, 1)
)
tmp = x / illuminants_to_use
mask_below = tmp <= epsilon
mask_above = tmp > epsilon
tmp = (
safe_frac_pow(tmp, 1.0 / 3.0) * mask_above
+ (kappa * tmp + 16.0) / 116.0 * mask_below
)
weights_xyz_to_lab = torch.tensor(
[[0, 116.0, 0], [500.0, -500.0, 0], [0, 200.0, -200.0]]
).to(x)
bias_xyz_to_lab = torch.tensor([-16.0, 0.0, 0.0]).to(x).view(1, 3, 1, 1)
x_lab = (
torch.matmul(tmp.permute(0, 2, 3, 1), weights_xyz_to_lab.t()).permute(
0, 3, 1, 2
)
+ bias_xyz_to_lab
)
return x_lab
def rgb2lab(x: torch.Tensor, data_range: Union[int, float] = 255) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of LAB images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
data_range: dynamic range of the input image.
Returns:
Batch of images with shape (N, 3, H, W). LAB colour space.
"""
return xyz2lab(rgb2xyz(x / float(data_range)))
def rgb2yiq(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of YIQ images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). YIQ colour space.
"""
yiq_weights = (
torch.tensor(
[
[0.299, 0.587, 0.114],
[0.5959, -0.2746, -0.3213],
[0.2115, -0.5227, 0.3112],
]
)
.t()
.to(x)
)
x_yiq = torch.matmul(x.permute(0, 2, 3, 1), yiq_weights).permute(0, 3, 1, 2)
return x_yiq
def rgb2lhm(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of LHM images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). LHM colour space.
Reference:
https://arxiv.org/pdf/1608.07433.pdf
"""
lhm_weights = (
torch.tensor([[0.2989, 0.587, 0.114], [0.3, 0.04, -0.35], [0.34, -0.6, 0.17]])
.t()
.to(x)
)
x_lhm = torch.matmul(x.permute(0, 2, 3, 1), lhm_weights).permute(0, 3, 1, 2)
return x_lhm
| 8,153 | 28.759124 | 102 | py |
BVQI | BVQI-master/pyiqa/utils/dist_util.py | # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def init_dist(launcher, backend="nccl", **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
if launcher == "pytorch":
_init_dist_pytorch(backend, **kwargs)
elif launcher == "slurm":
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f"Invalid launcher type: {launcher}")
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ["RANK"])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None):
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ["SLURM_PROCID"])
ntasks = int(os.environ["SLURM_NTASKS"])
node_list = os.environ["SLURM_NODELIST"]
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1")
# specify master port
if port is not None:
os.environ["MASTER_PORT"] = str(port)
elif "MASTER_PORT" in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ["MASTER_PORT"] = "29500"
os.environ["MASTER_ADDR"] = addr
os.environ["WORLD_SIZE"] = str(ntasks)
os.environ["LOCAL_RANK"] = str(proc_id % num_gpus)
os.environ["RANK"] = str(proc_id)
dist.init_process_group(backend=backend)
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
| 2,608 | 30.433735 | 102 | py |
BVQI | BVQI-master/pyiqa/data/livechallenge_dataset.py | import os
import pickle
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class LIVEChallengeDataset(data.Dataset):
"""The LIVE Challenge Dataset introduced by
D. Ghadiyaram and A.C. Bovik,
"Massive Online Crowdsourced Study of Subjective and Objective Picture Quality,"
IEEE Transactions on Image Processing, 2016
url: https://live.ece.utexas.edu/research/ChallengeDB/index.html
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(LIVEChallengeDataset, self).__init__()
self.opt = opt
target_img_folder = os.path.join(opt["dataroot_target"], "Images")
self.paths_mos = read_meta_info_file(target_img_folder, opt["meta_info_file"])
# remove first 7 training images as previous works
self.paths_mos = self.paths_mos[7:]
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][opt["phase"]]
self.paths_mos = [self.paths_mos[i] for i in splits]
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
img_range = opt.get("img_range", 1.0)
transform_list += [
tf.ToTensor(),
tf.Lambda(lambda x: x * img_range),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = self.paths_mos[index][0]
mos_label = self.paths_mos[index][1]
img_pil = Image.open(img_path)
img_tensor = self.trans(img_pil)
mos_label_tensor = torch.Tensor([mos_label])
return {"img": img_tensor, "mos_label": mos_label_tensor, "img_path": img_path}
def __len__(self):
return len(self.paths_mos)
| 2,524 | 33.589041 | 87 | py |
BVQI | BVQI-master/pyiqa/data/general_nr_dataset.py | import pickle
import cv2
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class GeneralNRDataset(data.Dataset):
"""General No Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(GeneralNRDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
target_img_folder = opt["dataroot_target"]
self.paths_mos = read_meta_info_file(target_img_folder, opt["meta_info_file"])
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
self.mos_max = opt.get("mos_max", 1.0)
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
self.img_range = opt.get("img_range", 1.0)
transform_list += [
PairedToTensor(),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = self.paths_mos[index][0]
mos_label = self.paths_mos[index][1]
img_pil = Image.open(img_path).convert("RGB")
img_tensor = self.trans(img_pil) * self.img_range
if self.use_dmos:
mos_label = self.dmos_max - mos_label
else:
mos_label /= self.mos_max
mos_label_tensor = torch.Tensor([mos_label])
return {"img": img_tensor, "mos_label": mos_label_tensor, "img_path": img_path}
def __len__(self):
return len(self.paths_mos)
| 2,691 | 31.433735 | 87 | py |
BVQI | BVQI-master/pyiqa/data/multiscale_trans_util.py | r"""Preprocessing utils for Multiscale Transformer
Reference: https://github.com/google-research/google-research/blob/5c622d523c/musiq/model/preprocessing.py
Modified: Chaofeng Chen (https://github.com/chaofengc)
"""
import math
from os import path as osp
from unittest.mock import patch
import numpy as np
import torch
from torch.nn import functional as F
def extract_image_patches(x, kernel, stride=1, dilation=1):
"""
Ref: https://stackoverflow.com/a/65886666
"""
# Do TF 'SAME' Padding
b, c, h, w = x.shape
h2 = math.ceil(h / stride)
w2 = math.ceil(w / stride)
pad_row = (h2 - 1) * stride + (kernel - 1) * dilation + 1 - h
pad_col = (w2 - 1) * stride + (kernel - 1) * dilation + 1 - w
x = F.pad(
x, (pad_col // 2, pad_col - pad_col // 2, pad_row // 2, pad_row - pad_row // 2)
)
# Extract patches
patches = F.unfold(x, kernel, dilation, stride=stride)
return patches
def _ceil_divide_int(x, y):
"""Returns ceil(x / y) as int"""
return int(math.ceil(x / y))
def resize_preserve_aspect_ratio(image, h, w, longer_side_length):
"""Aspect-ratio-preserving resizing with tf.image.ResizeMethod.GAUSSIAN.
Args:
image: The image tensor (n_crops, c, h, w).
h: Height of the input image.
w: Width of the input image.
longer_side_length: The length of the longer side after resizing.
Returns:
A tuple of [Image after resizing, Resized height, Resized width].
"""
# Computes the height and width after aspect-ratio-preserving resizing.
ratio = longer_side_length / max(h, w)
rh = round(h * ratio)
rw = round(w * ratio)
resized = F.interpolate(image, (rh, rw), mode="bicubic", align_corners=False)
return resized, rh, rw
def _pad_or_cut_to_max_seq_len(x, max_seq_len):
"""Pads (or cuts) patch tensor `max_seq_len`.
Args:
x: input tensor of shape (n_crops, c, num_patches).
max_seq_len: max sequence length.
Returns:
The padded or cropped tensor of shape (n_crops, c, max_seq_len).
"""
# Shape of x (n_crops, c, num_patches)
# Padding makes sure that # patches > max_seq_length. Note that it also
# makes the input mask zero for shorter input.
n_crops, c, num_patches = x.shape
paddings = torch.zeros((n_crops, c, max_seq_len)).to(x)
x = torch.cat([x, paddings], dim=-1)
x = x[:, :, :max_seq_len]
return x
def get_hashed_spatial_pos_emb_index(grid_size, count_h, count_w):
"""Get hased spatial pos embedding index for each patch.
The size H x W is hashed to grid_size x grid_size.
Args:
grid_size: grid size G for the hashed-based spatial positional embedding.
count_h: number of patches in each row for the image.
count_w: number of patches in each column for the image.
Returns:
hashed position of shape (1, HxW). Each value corresponded to the hashed
position index in [0, grid_size x grid_size).
"""
pos_emb_grid = torch.arange(grid_size).float()
pos_emb_hash_w = pos_emb_grid.reshape(1, 1, grid_size)
pos_emb_hash_w = F.interpolate(pos_emb_hash_w, (count_w), mode="nearest")
pos_emb_hash_w = pos_emb_hash_w.repeat(1, count_h, 1)
pos_emb_hash_h = pos_emb_grid.reshape(1, 1, grid_size)
pos_emb_hash_h = F.interpolate(pos_emb_hash_h, (count_h), mode="nearest")
pos_emb_hash_h = pos_emb_hash_h.transpose(1, 2)
pos_emb_hash_h = pos_emb_hash_h.repeat(1, 1, count_w)
pos_emb_hash = pos_emb_hash_h * grid_size + pos_emb_hash_w
pos_emb_hash = pos_emb_hash.reshape(1, -1)
return pos_emb_hash
def _extract_patches_and_positions_from_image(
image,
patch_size,
patch_stride,
hse_grid_size,
n_crops,
h,
w,
c,
scale_id,
max_seq_len,
):
"""Extracts patches and positional embedding lookup indexes for a given image.
Args:
image: the input image of shape [n_crops, c, h, w]
patch_size: the extracted patch size.
patch_stride: stride for extracting patches.
hse_grid_size: grid size for hash-based spatial positional embedding.
n_crops: number of crops from the input image.
h: height of the image.
w: width of the image.
c: number of channels for the image.
scale_id: the scale id for the image in the multi-scale representation.
max_seq_len: maximum sequence length for the number of patches. If
max_seq_len = 0, no patch is returned. If max_seq_len < 0 then we return
all the patches.
Returns:
A concatenating vector of (patches, HSE, SCE, input mask). The tensor shape
is (n_crops, num_patches, patch_size * patch_size * c + 3).
"""
n_crops, c, h, w = image.shape
p = extract_image_patches(image, patch_size, patch_stride)
assert p.shape[1] == c * patch_size ** 2
count_h = _ceil_divide_int(h, patch_stride)
count_w = _ceil_divide_int(w, patch_stride)
# Shape (1, num_patches)
spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)
# Shape (n_crops, 1, num_patches)
spatial_p = spatial_p.unsqueeze(1).repeat(n_crops, 1, 1)
scale_p = torch.ones_like(spatial_p) * scale_id
mask_p = torch.ones_like(spatial_p)
# Concatenating is a hacky way to pass both patches, positions and input
# mask to the model.
# Shape (n_crops, c * patch_size * patch_size + 3, num_patches)
out = torch.cat([p, spatial_p.to(p), scale_p.to(p), mask_p.to(p)], dim=1)
if max_seq_len >= 0:
out = _pad_or_cut_to_max_seq_len(out, max_seq_len)
return out
def get_multiscale_patches(
image,
patch_size=32,
patch_stride=32,
hse_grid_size=10,
longer_side_lengths=[224, 384],
max_seq_len_from_original_res=None,
):
"""Extracts image patches from multi-scale representation.
Args:
image: input image tensor with shape [n_crops, 3, h, w]
patch_size: patch size.
patch_stride: patch stride.
hse_grid_size: Hash-based positional embedding grid size.
longer_side_lengths: List of longer-side lengths for each scale in the
multi-scale representation.
max_seq_len_from_original_res: Maximum number of patches extracted from
original resolution. <0 means use all the patches from the original
resolution. None means we don't use original resolution input.
Returns:
A concatenating vector of (patches, HSE, SCE, input mask). The tensor shape
is (n_crops, num_patches, patch_size * patch_size * c + 3).
"""
# Sorting the list to ensure a deterministic encoding of the scale position.
longer_side_lengths = sorted(longer_side_lengths)
if len(image.shape) == 3:
image = image.unsqueeze(0)
n_crops, c, h, w = image.shape
outputs = []
for scale_id, longer_size in enumerate(longer_side_lengths):
resized_image, rh, rw = resize_preserve_aspect_ratio(image, h, w, longer_size)
max_seq_len = int(np.ceil(longer_size / patch_stride) ** 2)
out = _extract_patches_and_positions_from_image(
resized_image,
patch_size,
patch_stride,
hse_grid_size,
n_crops,
rh,
rw,
c,
scale_id,
max_seq_len,
)
outputs.append(out)
if max_seq_len_from_original_res is not None:
out = _extract_patches_and_positions_from_image(
image,
patch_size,
patch_stride,
hse_grid_size,
n_crops,
h,
w,
c,
len(longer_side_lengths),
max_seq_len_from_original_res,
)
outputs.append(out)
outputs = torch.cat(outputs, dim=-1)
return outputs.transpose(1, 2)
| 7,785 | 33.451327 | 106 | py |
BVQI | BVQI-master/pyiqa/data/general_fr_dataset.py | import pickle
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class GeneralFRDataset(data.Dataset):
"""General Full Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(GeneralFRDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
target_img_folder = opt["dataroot_target"]
ref_img_folder = opt.get("dataroot_ref", None)
self.paths_mos = read_meta_info_file(
target_img_folder, opt["meta_info_file"], mode="fr", ref_dir=ref_img_folder
)
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
self.mos_max = opt.get("mos_max", 1.0)
# do paired transform first and then do common transform
paired_transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
paired_transform_list += transform_mapping(k, v)
self.paired_trans = tf.Compose(paired_transform_list)
common_transform_list = []
self.img_range = opt.get("img_range", 1.0)
common_transform_list += [
PairedToTensor(),
]
self.common_trans = tf.Compose(common_transform_list)
def __getitem__(self, index):
ref_path = self.paths_mos[index][0]
img_path = self.paths_mos[index][1]
mos_label = self.paths_mos[index][2]
img_pil = Image.open(img_path).convert("RGB")
ref_pil = Image.open(ref_path).convert("RGB")
img_pil, ref_pil = self.paired_trans([img_pil, ref_pil])
img_tensor = self.common_trans(img_pil) * self.img_range
ref_tensor = self.common_trans(ref_pil) * self.img_range
if self.use_dmos:
mos_label = (self.dmos_max - mos_label) / self.dmos_max
else:
mos_label /= self.mos_max
mos_label_tensor = torch.Tensor([mos_label])
return {
"img": img_tensor,
"ref_img": ref_tensor,
"mos_label": mos_label_tensor,
"img_path": img_path,
"ref_img_path": ref_path,
}
def __len__(self):
return len(self.paths_mos)
| 3,366 | 32.67 | 87 | py |
BVQI | BVQI-master/pyiqa/data/prefetch_dataloader.py | import queue as Queue
import threading
import torch
from torch.utils.data import DataLoader
class PrefetchGenerator(threading.Thread):
"""A general prefetch generator.
Ref:
https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
Args:
generator: Python generator.
num_prefetch_queue (int): Number of prefetch queue.
"""
def __init__(self, generator, num_prefetch_queue):
threading.Thread.__init__(self)
self.queue = Queue.Queue(num_prefetch_queue)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def __next__(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __iter__(self):
return self
class PrefetchDataLoader(DataLoader):
"""Prefetch version of dataloader.
Ref:
https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#
TODO:
Need to test on single gpu and ddp (multi-gpu). There is a known issue in
ddp.
Args:
num_prefetch_queue (int): Number of prefetch queue.
kwargs (dict): Other arguments for dataloader.
"""
def __init__(self, num_prefetch_queue, **kwargs):
self.num_prefetch_queue = num_prefetch_queue
super(PrefetchDataLoader, self).__init__(**kwargs)
def __iter__(self):
return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
class CPUPrefetcher:
"""CPU prefetcher.
Args:
loader: Dataloader.
"""
def __init__(self, loader):
self.ori_loader = loader
self.loader = iter(loader)
def next(self):
try:
return next(self.loader)
except StopIteration:
return None
def reset(self):
self.loader = iter(self.ori_loader)
class CUDAPrefetcher:
"""CUDA prefetcher.
Ref:
https://github.com/NVIDIA/apex/issues/304#
It may consums more GPU memory.
Args:
loader: Dataloader.
opt (dict): Options.
"""
def __init__(self, loader, opt):
self.ori_loader = loader
self.loader = iter(loader)
self.opt = opt
self.stream = torch.cuda.Stream()
self.device = torch.device("cuda" if opt["num_gpu"] != 0 else "cpu")
self.preload()
def preload(self):
try:
self.batch = next(self.loader) # self.batch is a dict
except StopIteration:
self.batch = None
return None
# put tensors to gpu
with torch.cuda.stream(self.stream):
for k, v in self.batch.items():
if torch.is_tensor(v):
self.batch[k] = self.batch[k].to(
device=self.device, non_blocking=True
)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
self.preload()
return batch
def reset(self):
self.loader = iter(self.ori_loader)
self.preload()
| 3,174 | 23.612403 | 77 | py |
BVQI | BVQI-master/pyiqa/data/data_sampler.py | import math
import torch
from torch.utils.data.sampler import Sampler
class EnlargedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
Modified from torch.utils.data.distributed.DistributedSampler
Support enlarging the dataset for iteration-based training, for saving
time when restart the dataloader after each epoch
Args:
dataset (torch.utils.data.Dataset): Dataset used for sampling.
num_replicas (int | None): Number of processes participating in
the training. It is usually the world_size.
rank (int | None): Rank of the current process within num_replicas.
ratio (int): Enlarging ratio. Default: 1.
"""
def __init__(self, dataset, num_replicas, rank, ratio=1, use_shuffle=True):
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
self.use_shuffle = use_shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.use_shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(self.total_size, generator=g).tolist()
else:
indices = torch.arange(self.total_size).tolist()
dataset_size = len(self.dataset)
indices = [v % dataset_size for v in indices]
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 1,817 | 32.666667 | 83 | py |
BVQI | BVQI-master/pyiqa/data/ava_dataset.py | import itertools
import os
import pickle
import random
import cv2
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as tf
# avoid possible image read error in AVA dataset
from PIL import Image, ImageFile
from torch.utils import data as data
from pyiqa.data.transforms import transform_mapping
from pyiqa.utils.registry import DATASET_REGISTRY
ImageFile.LOAD_TRUNCATED_IMAGES = True
@DATASET_REGISTRY.register()
class AVADataset(data.Dataset):
"""AVA dataset, proposed by
Murray, Naila, Luca Marchesotti, and Florent Perronnin.
"AVA: A large-scale database for aesthetic visual analysis."
In 2012 IEEE conference on computer vision and pattern recognition (CVPR), pp. 2408-2415. IEEE, 2012.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(AVADataset, self).__init__()
self.opt = opt
target_img_folder = opt["dataroot_target"]
self.dataroot = target_img_folder
self.paths_mos = pd.read_csv(opt["meta_info_file"]).values.tolist()
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
# use val_num for validation
val_num = 2000
train_split = split_dict[split_index]["train"]
val_split = split_dict[split_index]["val"]
train_split = train_split + val_split[:-val_num]
val_split = val_split[-val_num:]
split_dict[split_index]["train"] = train_split
split_dict[split_index]["val"] = val_split
if opt.get("override_phase", None) is None:
splits = split_dict[split_index][opt["phase"]]
else:
splits = split_dict[split_index][opt["override_phase"]]
self.paths_mos = [self.paths_mos[i] for i in splits]
self.mean_mos = np.array([item[1] for item in self.paths_mos]).mean()
# self.paths_mos.sort(key=lambda x: x[1])
# n = 32
# n = 4
# tmp_list = [self.paths_mos[i: i + n] for i in range(0, len(self.paths_mos), n)]
# random.shuffle(tmp_list)
# self.paths_mos = list(itertools.chain.from_iterable(tmp_list))
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
img_range = opt.get("img_range", 1.0)
transform_list += [
tf.ToTensor(),
tf.Lambda(lambda x: x * img_range),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = os.path.join(self.dataroot, self.paths_mos[index][0])
mos_label = self.paths_mos[index][1]
mos_dist = self.paths_mos[index][2:12]
img_pil = Image.open(img_path).convert("RGB")
width, height = img_pil.size
img_tensor = self.trans(img_pil)
img_tensor2 = self.trans(img_pil)
mos_label_tensor = torch.Tensor([mos_label])
mos_dist_tensor = torch.Tensor(mos_dist) / sum(mos_dist)
if self.opt.get("list_imgs", False):
tmp_tensor = torch.zeros((img_tensor.shape[0], 800, 800))
h, w = img_tensor.shape[1:]
tmp_tensor[..., :h, :w] = img_tensor
return {
"img": tmp_tensor,
"mos_label": mos_label_tensor,
"mos_dist": mos_dist_tensor,
"org_size": torch.tensor([height, width]),
"img_path": img_path,
"mean_mos": torch.tensor(self.mean_mos),
}
else:
return {
"img": img_tensor,
"img2": img_tensor2,
"mos_label": mos_label_tensor,
"mos_dist": mos_dist_tensor,
"org_size": torch.tensor([height, width]),
"img_path": img_path,
"mean_mos": torch.tensor(self.mean_mos),
}
def __len__(self):
return len(self.paths_mos)
| 4,319 | 33.56 | 105 | py |
BVQI | BVQI-master/pyiqa/data/data_util.py | import csv
import os
from os import path as osp
import cv2
import numpy as np
import torch
from torch.nn import functional as F
from pyiqa.data.transforms import mod_crop
from pyiqa.utils import img2tensor, scandir
def read_meta_info_file(img_dir, meta_info_file, mode="nr", ref_dir=None):
"""Generate paths and mos labels from an meta information file.
Each line in the meta information file contains the image names and
mos label, separated by a white space.
Example of an meta information file:
- For NR datasets: name, mos(mean), std
```
100.bmp 32.56107532210109 19.12472638223644
```
- For FR datasets: ref_name, dist_name, mos(mean), std
```
I01.bmp I01_01_1.bmp 5.51429 0.13013
```
Args:
img_dir (str): directory path containing images
meta_info_file (str): Path to the meta information file.
Returns:
list[str, float]: image paths, mos label
"""
with open(meta_info_file, "r") as fin:
csvreader = csv.reader(fin)
name_mos = list(csvreader)[1:]
paths_mos = []
for item in name_mos:
if mode == "fr":
if ref_dir is None:
ref_dir = img_dir
ref_name, img_name, mos = item[:3]
ref_path = osp.join(ref_dir, ref_name)
img_path = osp.join(img_dir, img_name)
paths_mos.append([ref_path, img_path, float(mos)])
elif mode == "nr":
img_name, mos = item[:2]
img_path = osp.join(img_dir, img_name)
paths_mos.append([img_path, float(mos)])
return paths_mos
def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False):
"""Read a sequence of images from a given folder path.
Args:
path (list[str] | str): List of image paths or image folder path.
require_mod_crop (bool): Require mod crop for each image.
Default: False.
scale (int): Scale factor for mod_crop. Default: 1.
return_imgname(bool): Whether return image names. Default False.
Returns:
Tensor: size (t, c, h, w), RGB, [0, 1].
list[str]: Returned image name list.
"""
if isinstance(path, list):
img_paths = path
else:
img_paths = sorted(list(scandir(path, full_path=True)))
imgs = [cv2.imread(v).astype(np.float32) / 255.0 for v in img_paths]
if require_mod_crop:
imgs = [mod_crop(img, scale) for img in imgs]
imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
imgs = torch.stack(imgs, dim=0)
if return_imgname:
imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths]
return imgs, imgnames
else:
return imgs
def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding="reflection"):
"""Generate an index list for reading `num_frames` frames from a sequence
of images.
Args:
crt_idx (int): Current center index.
max_frame_num (int): Max number of the sequence of images (from 1).
num_frames (int): Reading num_frames frames.
padding (str): Padding mode, one of
'replicate' | 'reflection' | 'reflection_circle' | 'circle'
Examples: current_idx = 0, num_frames = 5
The generated frame indices under different padding mode:
replicate: [0, 0, 0, 1, 2]
reflection: [2, 1, 0, 1, 2]
reflection_circle: [4, 3, 0, 1, 2]
circle: [3, 4, 0, 1, 2]
Returns:
list[int]: A list of indices.
"""
assert num_frames % 2 == 1, "num_frames should be an odd number."
assert padding in (
"replicate",
"reflection",
"reflection_circle",
"circle",
), f"Wrong padding mode: {padding}."
max_frame_num = max_frame_num - 1 # start from 0
num_pad = num_frames // 2
indices = []
for i in range(crt_idx - num_pad, crt_idx + num_pad + 1):
if i < 0:
if padding == "replicate":
pad_idx = 0
elif padding == "reflection":
pad_idx = -i
elif padding == "reflection_circle":
pad_idx = crt_idx + num_pad - i
else:
pad_idx = num_frames + i
elif i > max_frame_num:
if padding == "replicate":
pad_idx = max_frame_num
elif padding == "reflection":
pad_idx = max_frame_num * 2 - i
elif padding == "reflection_circle":
pad_idx = (crt_idx - num_pad) - (i - max_frame_num)
else:
pad_idx = i - num_frames
else:
pad_idx = i
indices.append(pad_idx)
return indices
def paired_paths_from_lmdb(folders, keys):
"""Generate paired paths from lmdb files.
Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
lq.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records
1)image name (with extension),
2)image shape,
3)compression level, separated by a white space.
Example: `baboon.png (120,125,3) 1`
We use the image name without extension as the lmdb key.
Note that we use the same key for the corresponding lq and gt images.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
Note that this key is different from lmdb keys.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
"The len of folders should be 2 with [input_folder, gt_folder]. "
f"But got {len(folders)}"
)
assert (
len(keys) == 2
), f"The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}"
input_folder, gt_folder = folders
input_key, gt_key = keys
if not (input_folder.endswith(".lmdb") and gt_folder.endswith(".lmdb")):
raise ValueError(
f"{input_key} folder and {gt_key} folder should both in lmdb "
f"formats. But received {input_key}: {input_folder}; "
f"{gt_key}: {gt_folder}"
)
# ensure that the two meta_info files are the same
with open(osp.join(input_folder, "meta_info.txt")) as fin:
input_lmdb_keys = [line.split(".")[0] for line in fin]
with open(osp.join(gt_folder, "meta_info.txt")) as fin:
gt_lmdb_keys = [line.split(".")[0] for line in fin]
if set(input_lmdb_keys) != set(gt_lmdb_keys):
raise ValueError(
f"Keys in {input_key}_folder and {gt_key}_folder are different."
)
else:
paths = []
for lmdb_key in sorted(input_lmdb_keys):
paths.append(
dict([(f"{input_key}_path", lmdb_key), (f"{gt_key}_path", lmdb_key)])
)
return paths
def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl):
"""Generate paired paths from an meta information file.
Each line in the meta information file contains the image names and
image shape (usually for gt), separated by a white space.
Example of an meta information file:
```
0001_s001.png (480,480,3)
0001_s002.png (480,480,3)
```
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
meta_info_file (str): Path to the meta information file.
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
"The len of folders should be 2 with [input_folder, gt_folder]. "
f"But got {len(folders)}"
)
assert (
len(keys) == 2
), f"The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}"
input_folder, gt_folder = folders
input_key, gt_key = keys
with open(meta_info_file, "r") as fin:
gt_names = [line.strip().split(" ")[0] for line in fin]
paths = []
for gt_name in gt_names:
basename, ext = osp.splitext(osp.basename(gt_name))
input_name = f"{filename_tmpl.format(basename)}{ext}"
input_path = osp.join(input_folder, input_name)
gt_path = osp.join(gt_folder, gt_name)
paths.append(
dict([(f"{input_key}_path", input_path), (f"{gt_key}_path", gt_path)])
)
return paths
def paired_paths_from_folder(folders, keys, filename_tmpl):
"""Generate paired paths from folders.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
"The len of folders should be 2 with [input_folder, gt_folder]. "
f"But got {len(folders)}"
)
assert (
len(keys) == 2
), f"The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}"
input_folder, gt_folder = folders
input_key, gt_key = keys
input_paths = list(scandir(input_folder))
gt_paths = list(scandir(gt_folder))
assert len(input_paths) == len(gt_paths), (
f"{input_key} and {gt_key} datasets have different number of images: "
f"{len(input_paths)}, {len(gt_paths)}."
)
paths = []
for gt_path in gt_paths:
basename, ext = osp.splitext(osp.basename(gt_path))
input_name = f"{filename_tmpl.format(basename)}{ext}"
input_path = osp.join(input_folder, input_name)
assert input_name in input_paths, f"{input_name} is not in {input_key}_paths."
gt_path = osp.join(gt_folder, gt_path)
paths.append(
dict([(f"{input_key}_path", input_path), (f"{gt_key}_path", gt_path)])
)
return paths
def paths_from_folder(folder):
"""Generate paths from folder.
Args:
folder (str): Folder path.
Returns:
list[str]: Returned path list.
"""
paths = list(scandir(folder))
paths = [osp.join(folder, path) for path in paths]
return paths
def paths_from_lmdb(folder):
"""Generate paths from lmdb.
Args:
folder (str): Folder path.
Returns:
list[str]: Returned path list.
"""
if not folder.endswith(".lmdb"):
raise ValueError(f"Folder {folder}folder should in lmdb format.")
with open(osp.join(folder, "meta_info.txt")) as fin:
paths = [line.split(".")[0] for line in fin]
return paths
def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
"""Generate Gaussian kernel used in `duf_downsample`.
Args:
kernel_size (int): Kernel size. Default: 13.
sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
Returns:
np.array: The Gaussian kernel.
"""
from scipy.ndimage import filters as filters
kernel = np.zeros((kernel_size, kernel_size))
# set element at the middle to one, a dirac delta
kernel[kernel_size // 2, kernel_size // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter
return filters.gaussian_filter(kernel, sigma)
def duf_downsample(x, kernel_size=13, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code.
Args:
x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
kernel_size (int): Kernel size. Default: 13.
scale (int): Downsampling factor. Supported scale: (2, 3, 4).
Default: 4.
Returns:
Tensor: DUF downsampled frames.
"""
assert scale in (2, 3, 4), f"Only support scale (2, 3, 4), but got {scale}."
squeeze_flag = False
if x.ndim == 4:
squeeze_flag = True
x = x.unsqueeze(0)
b, t, c, h, w = x.size()
x = x.view(-1, 1, h, w)
pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), "reflect")
gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
gaussian_filter = (
torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0)
)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(b, t, c, x.size(2), x.size(3))
if squeeze_flag:
x = x.squeeze(0)
return x
| 13,379 | 32.959391 | 86 | py |
BVQI | BVQI-master/pyiqa/data/flive_dataset.py | import pickle
import cv2
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class FLIVEDataset(data.Dataset):
"""General No Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(FLIVEDataset, self).__init__()
self.opt = opt
target_img_folder = opt["dataroot_target"]
self.paths_mos = read_meta_info_file(target_img_folder, opt["meta_info_file"])
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
if opt.get("override_phase", None) is None:
splits = split_dict[split_index][opt["phase"]]
else:
splits = split_dict[split_index][opt["override_phase"]]
if opt["phase"] == "train":
self.paths_mos = [self.paths_mos[i] for i in splits]
else:
# remove patches during validation and test
self.paths_mos = [self.paths_mos[i] for i in splits]
self.paths_mos = [
[p, m] for p, m in self.paths_mos if not "patches/" in p
]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
self.mos_max = opt.get("mos_max", 1.0)
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
self.img_range = opt.get("img_range", 1.0)
transform_list += [
tf.ToTensor(),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = self.paths_mos[index][0]
mos_label = self.paths_mos[index][1]
img_pil = Image.open(img_path).convert("RGB")
img_tensor = self.trans(img_pil) * self.img_range
if self.use_dmos:
mos_label = self.dmos_max - mos_label
else:
mos_label = mos_label / self.mos_max
mos_label_tensor = torch.Tensor([mos_label])
return {"img": img_tensor, "mos_label": mos_label_tensor, "img_path": img_path}
def __len__(self):
return len(self.paths_mos)
| 2,999 | 33.090909 | 87 | py |
BVQI | BVQI-master/pyiqa/data/bapps_dataset.py | import os
import pickle
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class BAPPSDataset(data.Dataset):
"""The BAPPS Dataset introduced by:
Zhang, Richard and Isola, Phillip and Efros, Alexei A and Shechtman, Eli and Wang, Oliver
The Unreasonable Effectiveness of Deep Features as a Perceptual Metric.
CVPR2018
url: https://github.com/richzhang/PerceptualSimilarity
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
mode (str):
- 2afc: load 2afc triplet data
- jnd: load jnd pair data
"""
def __init__(self, opt):
super(BAPPSDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
self.dataset_mode = opt.get("mode", "2afc")
val_types = opt.get("val_types", None)
target_img_folder = opt["dataroot_target"]
self.dataroot = target_img_folder
ref_img_folder = opt.get("dataroot_ref", None)
self.paths_mos = pd.read_csv(opt["meta_info_file"]).values.tolist()
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
if self.dataset_mode == "2afc":
self.paths_mos = [x for x in self.paths_mos if x[0] != "jnd"]
elif self.dataset_mode == "jnd":
self.paths_mos = [x for x in self.paths_mos if x[0] == "jnd"]
if val_types is not None:
tmp_paths_mos = []
for item in self.paths_mos:
for vt in val_types:
if vt in item[1]:
tmp_paths_mos.append(item)
self.paths_mos = tmp_paths_mos
# TODO: paired transform
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
img_range = opt.get("img_range", 1.0)
transform_list += [
PairedToTensor(),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
is_jnd_data = self.paths_mos[index][0] == "jnd"
distA_path = os.path.join(self.dataroot, self.paths_mos[index][1])
distB_path = os.path.join(self.dataroot, self.paths_mos[index][2])
distA_pil = Image.open(distA_path).convert("RGB")
distB_pil = Image.open(distB_path).convert("RGB")
score = self.paths_mos[index][3]
# original 0 means prefer p0, transfer to probability of p0
mos_label_tensor = torch.Tensor([score])
if not is_jnd_data:
ref_path = os.path.join(self.dataroot, self.paths_mos[index][0])
ref_img_pil = Image.open(ref_path).convert("RGB")
distA_tensor, distB_tensor, ref_tensor = self.trans(
[distA_pil, distB_pil, ref_img_pil]
)
else:
distA_tensor, distB_tensor = self.trans([distA_pil, distB_pil])
if not is_jnd_data:
return {
"ref_img": ref_tensor,
"distB_img": distB_tensor,
"distA_img": distA_tensor,
"mos_label": mos_label_tensor,
"img_path": ref_path,
"distB_path": distB_path,
"distA_path": distA_path,
}
else:
return {
"distB_img": distB_tensor,
"distA_img": distA_tensor,
"mos_label": mos_label_tensor,
"distB_path": distB_path,
"distA_path": distA_path,
}
def __len__(self):
return len(self.paths_mos)
| 4,533 | 33.090226 | 93 | py |
BVQI | BVQI-master/pyiqa/data/pipal_dataset.py | import pickle
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class PIPALDataset(data.Dataset):
"""General Full Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(PIPALDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
target_img_folder = opt["dataroot_target"]
ref_img_folder = opt.get("dataroot_ref", None)
self.paths_mos = read_meta_info_file(
target_img_folder, opt["meta_info_file"], mode="fr", ref_dir=ref_img_folder
)
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
# do paired transform first and then do common transform
paired_transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
paired_transform_list += transform_mapping(k, v)
self.paired_trans = tf.Compose(paired_transform_list)
common_transform_list = []
self.img_range = opt.get("img_range", 1.0)
common_transform_list += [
PairedToTensor(),
]
self.common_trans = tf.Compose(common_transform_list)
def __getitem__(self, index):
ref_path = self.paths_mos[index][0]
img_path = self.paths_mos[index][1]
mos_label = self.paths_mos[index][2]
img_pil = Image.open(img_path).convert("RGB")
ref_pil = Image.open(ref_path).convert("RGB")
img_pil, ref_pil = self.paired_trans([img_pil, ref_pil])
img_tensor = self.common_trans(img_pil) * self.img_range
ref_tensor = self.common_trans(ref_pil) * self.img_range
if self.use_dmos:
mos_label = self.dmos_max - mos_label
mos_label_tensor = torch.Tensor([mos_label])
return {
"img": img_tensor,
"ref_img": ref_tensor,
"mos_label": mos_label_tensor,
"img_path": img_path,
"ref_img_path": ref_path,
}
def __len__(self):
return len(self.paths_mos)
| 3,241 | 32.42268 | 87 | py |
BVQI | BVQI-master/pyiqa/data/pieapp_dataset.py | import os
import pickle
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class PieAPPDataset(data.Dataset):
"""The PieAPP Dataset introduced by:
Prashnani, Ekta and Cai, Hong and Mostofi, Yasamin and Sen, Pradeep
PieAPP: Perceptual Image-Error Assessment Through Pairwise Preference
CVPR2018
url: http://civc.ucsb.edu/graphics/Papers/CVPR2018_PieAPP/
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(PieAPPDataset, self).__init__()
self.opt = opt
target_img_folder = opt["dataroot_target"]
self.dataroot = target_img_folder
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
if self.phase == "test":
metadata = pd.read_csv(
opt["meta_info_file"],
usecols=[
"ref_img_path",
"dist_imgB_path",
"per_img score for dist_imgB",
],
)
else:
metadata = pd.read_csv(opt["meta_info_file"])
self.paths_mos = metadata.values.tolist()
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
# remove duplicates
if self.phase == "test":
temp = []
[temp.append(item) for item in self.paths_mos if not item in temp]
self.paths_mos = temp
# do paired transform first and then do common transform
paired_transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
paired_transform_list += transform_mapping(k, v)
self.paired_trans = tf.Compose(paired_transform_list)
common_transform_list = []
self.img_range = opt.get("img_range", 1.0)
common_transform_list += [
PairedToTensor(),
]
self.common_trans = tf.Compose(common_transform_list)
def __getitem__(self, index):
ref_path = os.path.join(self.dataroot, self.paths_mos[index][0])
if self.phase == "test":
distB_path = os.path.join(self.dataroot, self.paths_mos[index][1])
else:
distA_path = os.path.join(self.dataroot, self.paths_mos[index][1])
distB_path = os.path.join(self.dataroot, self.paths_mos[index][2])
distB_pil = Image.open(distB_path).convert("RGB")
ref_img_pil = Image.open(ref_path).convert("RGB")
if self.phase != "test":
distA_pil = Image.open(distA_path).convert("RGB")
distA_pil, distB_pil, ref_img_pil = self.paired_trans(
[distA_pil, distB_pil, ref_img_pil]
)
distA_tensor, distB_tensor, ref_tensor = self.common_trans(
[distA_pil, distB_pil, ref_img_pil]
)
else:
distB_pil, ref_img_pil = self.paired_trans([distB_pil, ref_img_pil])
distB_tensor, ref_tensor = self.common_trans([distB_pil, ref_img_pil])
if self.phase == "train":
score = self.paths_mos[index][4]
mos_label_tensor = torch.Tensor([score])
distB_score = torch.Tensor([-1])
elif self.phase == "val":
score = self.paths_mos[index][4]
mos_label_tensor = torch.Tensor([score])
distB_score = torch.Tensor([-1])
elif self.phase == "test":
per_img_score = self.paths_mos[index][2]
distB_score = torch.Tensor([per_img_score])
if self.phase == "test":
return {
"img": distB_tensor,
"ref_img": ref_tensor,
"mos_label": distB_score,
"img_path": distB_path,
"ref_img_path": ref_path,
}
else:
return {
"distB_img": distB_tensor,
"ref_img": ref_tensor,
"distA_img": distA_tensor,
"mos_label": mos_label_tensor,
"distB_per_img_score": distB_score,
"distB_path": distB_path,
"ref_img_path": ref_path,
"distA_path": distA_path,
}
def __len__(self):
return len(self.paths_mos)
| 5,149 | 34.273973 | 82 | py |
BVQI | BVQI-master/pyiqa/data/__init__.py | import importlib
import random
from copy import deepcopy
from functools import partial
from os import path as osp
import numpy as np
import torch
import torch.utils.data
from pyiqa.data.prefetch_dataloader import PrefetchDataLoader
from pyiqa.utils import get_root_logger, scandir
from pyiqa.utils.dist_util import get_dist_info
from pyiqa.utils.registry import DATASET_REGISTRY
__all__ = ["build_dataset", "build_dataloader"]
# automatically scan and import dataset modules for registry
# scan all the files under the data folder with '_dataset' in file names
data_folder = osp.dirname(osp.abspath(__file__))
dataset_filenames = [
osp.splitext(osp.basename(v))[0]
for v in scandir(data_folder)
if v.endswith("_dataset.py")
]
# import all the dataset modules
_dataset_modules = [
importlib.import_module(f"pyiqa.data.{file_name}")
for file_name in dataset_filenames
]
def build_dataset(dataset_opt):
"""Build dataset from options.
Args:
dataset_opt (dict): Configuration for dataset. It must contain:
name (str): Dataset name.
type (str): Dataset type.
"""
dataset_opt = deepcopy(dataset_opt)
dataset = DATASET_REGISTRY.get(dataset_opt["type"])(dataset_opt)
logger = get_root_logger()
logger.info(
f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} ' "is built."
)
return dataset
def build_dataloader(
dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None
):
"""Build dataloader.
Args:
dataset (torch.utils.data.Dataset): Dataset.
dataset_opt (dict): Dataset options. It contains the following keys:
phase (str): 'train' or 'val'.
num_worker_per_gpu (int): Number of workers for each GPU.
batch_size_per_gpu (int): Training batch size for each GPU.
num_gpu (int): Number of GPUs. Used only in the train phase.
Default: 1.
dist (bool): Whether in distributed training. Used only in the train
phase. Default: False.
sampler (torch.utils.data.sampler): Data sampler. Default: None.
seed (int | None): Seed. Default: None
"""
phase = dataset_opt["phase"]
rank, _ = get_dist_info()
if phase == "train":
if dist: # distributed training
batch_size = dataset_opt["batch_size_per_gpu"]
num_workers = dataset_opt["num_worker_per_gpu"]
else: # non-distributed training
multiplier = 1 if num_gpu == 0 else num_gpu
batch_size = dataset_opt["batch_size_per_gpu"] * multiplier
num_workers = dataset_opt["num_worker_per_gpu"] * multiplier
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
drop_last=True,
)
if sampler is None:
dataloader_args["shuffle"] = True
dataloader_args["worker_init_fn"] = (
partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)
if seed is not None
else None
)
elif phase in ["val", "test"]: # validation
batch_size = dataset_opt.get("batch_size_per_gpu", 1)
num_workers = dataset_opt.get("num_worker_per_gpu", 0)
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
)
else:
raise ValueError(
f"Wrong dataset phase: {phase}. "
"Supported ones are 'train', 'val' and 'test'."
)
dataloader_args["pin_memory"] = dataset_opt.get("pin_memory", False)
dataloader_args["persistent_workers"] = dataset_opt.get("persistent_workers", False)
prefetch_mode = dataset_opt.get("prefetch_mode")
if prefetch_mode == "cpu": # CPUPrefetcher
num_prefetch_queue = dataset_opt.get("num_prefetch_queue", 1)
logger = get_root_logger()
logger.info(
f"Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}"
)
return PrefetchDataLoader(
num_prefetch_queue=num_prefetch_queue, **dataloader_args
)
else:
# prefetch_mode=None: Normal dataloader
# prefetch_mode='cuda': dataloader for CUDAPrefetcher
return torch.utils.data.DataLoader(**dataloader_args)
def worker_init_fn(worker_id, num_workers, rank, seed):
# Set the worker seed to num_workers * rank + worker_id + seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 4,699 | 34.606061 | 97 | py |
BVQI | BVQI-master/pyiqa/data/transforms.py | import functools
import random
from collections.abc import Sequence
from typing import Union
import cv2
import numpy as np
import torch
import torchvision.transforms as tf
import torchvision.transforms.functional as F
from imgaug import augmenters as iaa
from PIL import Image
from pyiqa.archs.arch_util import to_2tuple
def transform_mapping(key, args):
if key == "hflip" and args:
return [PairedRandomHorizontalFlip()]
if key == "vflip" and args:
return [PairedRandomHorizontalFlip()]
elif key == "random_crop":
return [PairedRandomCrop(args)]
elif key == "center_crop":
return [PairedCenterCrop(args)]
elif key == "resize":
return [PairedResize(args)]
elif key == "adaptive_resize":
return [PairedAdaptiveResize(args)]
elif key == "random_square_resize":
return [PairedRandomSquareResize(args)]
elif key == "random_arp_resize":
return [PairedRandomARPResize(args)]
elif key == "ada_pad":
return [PairedAdaptivePadding(args)]
elif key == "rot90" and args:
return [PairedRandomRot90(args)]
elif key == "randomerase":
return [PairedRandomErasing(**args)]
elif key == "changecolor":
return [ChangeColorSpace(args)]
elif key == "totensor" and args:
return [PairedToTensor()]
else:
return []
def _check_pair(x):
if isinstance(x, (tuple, list)) and len(x) >= 2:
return True
class PairedToTensor(tf.ToTensor):
"""Pair version of center crop"""
def to_tensor(self, x):
if isinstance(x, torch.Tensor):
return x
else:
return F.to_tensor(x)
def __call__(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = self.to_tensor(imgs[i])
return imgs
else:
return self.to_tensor(imgs)
class ChangeColorSpace:
"""Pair version of center crop"""
def __init__(self, to_colorspace):
self.aug_op = iaa.color.ChangeColorspace(to_colorspace)
def __call__(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
tmpimg = self.aug_op.augment_image(np.array(imgs[i]))
imgs[i] = Image.fromarray(tmpimg)
return imgs
else:
imgs = self.aug_op.augment_image(np.array(imgs))
return Image.fromarray(imgs)
class PairedCenterCrop(tf.CenterCrop):
"""Pair version of center crop"""
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = super().forward(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomCrop(tf.RandomCrop):
"""Pair version of random crop"""
def _pad(self, img):
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
width, height = img.size
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
img = F.pad(img, padding, self.fill, self.padding_mode)
return img
def forward(self, imgs):
if _check_pair(imgs):
i, j, h, w = self.get_params(imgs[0], self.size)
for i in range(len(imgs)):
img = self._pad(imgs[i])
img = F.crop(img, i, j, h, w)
imgs[i] = img
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomErasing(tf.RandomErasing):
"""Pair version of random erasing"""
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
# cast self.value to script acceptable type
if isinstance(self.value, (int, float)):
value = [self.value]
elif isinstance(self.value, str):
value = None
elif isinstance(self.value, tuple):
value = list(self.value)
else:
value = self.value
if value is not None and not (len(value) in (1, imgs[0].shape[-3])):
raise ValueError(
"If value is a sequence, it should have either a single value or "
f"{imgs[0].shape[-3]} (number of input channels)"
)
x, y, h, w, v = self.get_params(
imgs[0], scale=self.scale, ratio=self.ratio, value=value
)
for i in range(len(imgs)):
imgs[i] = F.erase(imgs[i], x, y, h, w, v, self.inplace)
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomHorizontalFlip(tf.RandomHorizontalFlip):
"""Pair version of random hflip"""
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
for i in range(len(imgs)):
imgs[i] = F.hflip(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomVerticalFlip(tf.RandomVerticalFlip):
"""Pair version of random hflip"""
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
for i in range(len(imgs)):
imgs[i] = F.vflip(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomRot90(torch.nn.Module):
"""Pair version of random hflip"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
for i in range(len(imgs)):
imgs[i] = F.rotate(imgs[i], 90)
return imgs
elif isinstance(imgs, Image.Image):
if torch.rand(1) < self.p:
imgs = F.rotate(imgs, 90)
return imgs
class PairedResize(tf.Resize):
"""Pair version of resize"""
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = super().forward(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedAdaptiveResize(tf.Resize):
"""ARP preserved resize when necessary"""
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
tmpimg = imgs[i]
min_size = min(tmpimg.size)
if min_size < self.size:
tmpimg = super().forward(tmpimg)
imgs[i] = tmpimg
return imgs
elif isinstance(imgs, Image.Image):
tmpimg = imgs
min_size = min(tmpimg.size)
if min_size < self.size:
tmpimg = super().forward(tmpimg)
return tmpimg
class PairedRandomARPResize(torch.nn.Module):
"""Pair version of resize"""
def __init__(
self, size_range, interpolation=tf.InterpolationMode.BILINEAR, antialias=None
):
super().__init__()
self.interpolation = interpolation
self.antialias = antialias
self.size_range = size_range
if not (isinstance(size_range, Sequence) and len(size_range) == 2):
raise TypeError(
f"size_range should be sequence with 2 int. Got {size_range} with {type(size_range)}"
)
def forward(self, imgs):
min_size, max_size = sorted(self.size_range)
target_size = random.randint(min_size, max_size)
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = F.resize(imgs[i], target_size, self.interpolation)
return imgs
elif isinstance(imgs, Image.Image):
return F.resize(imgs, target_size, self.interpolation)
class PairedRandomSquareResize(torch.nn.Module):
"""Pair version of resize"""
def __init__(
self, size_range, interpolation=tf.InterpolationMode.BILINEAR, antialias=None
):
super().__init__()
self.interpolation = interpolation
self.antialias = antialias
self.size_range = size_range
if not (isinstance(size_range, Sequence) and len(size_range) == 2):
raise TypeError(
f"size_range should be sequence with 2 int. Got {size_range} with {type(size_range)}"
)
def forward(self, imgs):
min_size, max_size = sorted(self.size_range)
target_size = random.randint(min_size, max_size)
target_size = (target_size, target_size)
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = F.resize(imgs[i], target_size, self.interpolation)
return imgs
elif isinstance(imgs, Image.Image):
return F.resize(imgs, target_size, self.interpolation)
class PairedAdaptivePadding(torch.nn.Module):
"""Pair version of resize"""
def __init__(self, target_size, fill=0, padding_mode="constant"):
super().__init__()
self.target_size = to_2tuple(target_size)
self.fill = fill
self.padding_mode = padding_mode
def get_padding(self, x):
w, h = x.size
th, tw = self.target_size
assert (
th >= h and tw >= w
), f"Target size {self.target_size} should be larger than image size ({h}, {w})"
pad_row = th - h
pad_col = tw - w
pad_l, pad_r, pad_t, pad_b = (
pad_col // 2,
pad_col - pad_col // 2,
pad_row // 2,
pad_row - pad_row // 2,
)
return (pad_l, pad_t, pad_r, pad_b)
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
padding = self.get_padding(imgs[i])
imgs[i] = F.pad(imgs[i], padding, self.fill, self.padding_mode)
return imgs
elif isinstance(imgs, Image.Image):
padding = self.get_padding(imgs)
imgs = F.pad(imgs, padding, self.fill, self.padding_mode)
return imgs
def mod_crop(img, scale):
"""Mod crop images, used during testing.
Args:
img (ndarray): Input image.
scale (int): Scale factor.
Returns:
ndarray: Result image.
"""
img = img.copy()
if img.ndim in (2, 3):
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[: h - h_remainder, : w - w_remainder, ...]
else:
raise ValueError(f"Wrong img ndim: {img.ndim}.")
return img
def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
We use vertical flip and transpose for rotation implementation.
All the images in the list use the same augmentation.
Args:
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
is an ndarray, it will be transformed to a list.
hflip (bool): Horizontal flip. Default: True.
rotation (bool): Ratotation. Default: True.
flows (list[ndarray]: Flows to be augmented. If the input is an
ndarray, it will be transformed to a list.
Dimension is (h, w, 2). Default: None.
return_status (bool): Return the status of flip and rotation.
Default: False.
Returns:
list[ndarray] | ndarray: Augmented images and flows. If returned
results only have one element, just return ndarray.
"""
hflip = hflip and random.random() < 0.5
vflip = rotation and random.random() < 0.5
rot90 = rotation and random.random() < 0.5
def _augment(img):
if hflip: # horizontal
cv2.flip(img, 1, img)
if vflip: # vertical
cv2.flip(img, 0, img)
if rot90:
img = img.transpose(1, 0, 2)
return img
def _augment_flow(flow):
if hflip: # horizontal
cv2.flip(flow, 1, flow)
flow[:, :, 0] *= -1
if vflip: # vertical
cv2.flip(flow, 0, flow)
flow[:, :, 1] *= -1
if rot90:
flow = flow.transpose(1, 0, 2)
flow = flow[:, :, [1, 0]]
return flow
if not isinstance(imgs, list):
imgs = [imgs]
imgs = [_augment(img) for img in imgs]
if len(imgs) == 1:
imgs = imgs[0]
if flows is not None:
if not isinstance(flows, list):
flows = [flows]
flows = [_augment_flow(flow) for flow in flows]
if len(flows) == 1:
flows = flows[0]
return imgs, flows
else:
if return_status:
return imgs, (hflip, vflip, rot90)
else:
return imgs
def img_rotate(img, angle, center=None, scale=1.0):
"""Rotate image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees. Positive values mean
counter-clockwise rotation.
center (tuple[int]): Rotation center. If the center is None,
initialize it as the center of the image. Default: None.
scale (float): Isotropic scale factor. Default: 1.0.
"""
(h, w) = img.shape[:2]
if center is None:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
rotated_img = cv2.warpAffine(img, matrix, (w, h))
return rotated_img
| 13,970 | 31.117241 | 101 | py |
BVQI | BVQI-master/pyiqa/archs/maniqa_arch.py | r"""MANIQA proposed by
MANIQA: Multi-dimension Attention Network for No-Reference Image Quality Assessment
Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang and Yujiu Yang.
CVPR Workshop 2022, winner of NTIRE2022 NRIQA challenge
Reference:
- Official github: https://github.com/IIGROUP/MANIQA
"""
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from timm.data import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
IMAGENET_INCEPTION_MEAN,
IMAGENET_INCEPTION_STD,
)
from timm.models.vision_transformer import Block
from torch import nn
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import extract_2d_patches
from .maniqa_swin import SwinTransformer
default_model_urls = {
"pipal": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/MANIQA_PIPAL-ae6d356b.pth"
}
def random_crop(x, sample_size=224, sample_num=8):
b, c, h, w = x.shape
th = tw = sample_size
cropped_x = []
for s in range(sample_num):
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
cropped_x.append(x[:, :, i : i + th, j : j + tw])
cropped_x = torch.stack(cropped_x, dim=1)
return cropped_x
class TABlock(nn.Module):
def __init__(self, dim, drop=0.1):
super().__init__()
self.c_q = nn.Linear(dim, dim)
self.c_k = nn.Linear(dim, dim)
self.c_v = nn.Linear(dim, dim)
self.norm_fact = dim ** -0.5
self.softmax = nn.Softmax(dim=-1)
self.proj_drop = nn.Dropout(drop)
def forward(self, x):
_x = x
B, C, N = x.shape
q = self.c_q(x)
k = self.c_k(x)
v = self.c_v(x)
attn = q @ k.transpose(-2, -1) * self.norm_fact
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B, C, N)
x = self.proj_drop(x)
x = x + _x
return x
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
@ARCH_REGISTRY.register()
class MANIQA(nn.Module):
def __init__(
self,
embed_dim=768,
num_outputs=1,
patch_size=8,
drop=0.1,
depths=[2, 2],
window_size=4,
dim_mlp=768,
num_heads=[4, 4],
img_size=224,
num_tab=2,
scale=0.13,
test_sample=20,
pretrained=True,
pretrained_model_path=None,
default_mean=None,
default_std=None,
**kwargs,
):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.input_size = img_size // patch_size
self.test_sample = test_sample
self.patches_resolution = (img_size // patch_size, img_size // patch_size)
self.vit = timm.create_model("vit_base_patch8_224", pretrained=True)
self.save_output = SaveOutput()
hook_handles = []
for layer in self.vit.modules():
if isinstance(layer, Block):
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
self.tablock1 = nn.ModuleList()
for i in range(num_tab):
tab = TABlock(self.input_size ** 2)
self.tablock1.append(tab)
self.conv1 = nn.Conv2d(embed_dim * 4, embed_dim, 1, 1, 0)
self.swintransformer1 = SwinTransformer(
patches_resolution=self.patches_resolution,
depths=depths,
num_heads=num_heads,
embed_dim=embed_dim,
window_size=window_size,
dim_mlp=dim_mlp,
scale=scale,
)
self.tablock2 = nn.ModuleList()
for i in range(num_tab):
tab = TABlock(self.input_size ** 2)
self.tablock2.append(tab)
self.conv2 = nn.Conv2d(embed_dim, embed_dim // 2, 1, 1, 0)
self.swintransformer2 = SwinTransformer(
patches_resolution=self.patches_resolution,
depths=depths,
num_heads=num_heads,
embed_dim=embed_dim // 2,
window_size=window_size,
dim_mlp=dim_mlp,
scale=scale,
)
self.fc_score = nn.Sequential(
nn.Linear(embed_dim // 2, embed_dim // 2),
nn.ReLU(),
nn.Dropout(drop),
nn.Linear(embed_dim // 2, num_outputs),
nn.ReLU(),
)
self.fc_weight = nn.Sequential(
nn.Linear(embed_dim // 2, embed_dim // 2),
nn.ReLU(),
nn.Dropout(drop),
nn.Linear(embed_dim // 2, num_outputs),
nn.Sigmoid(),
)
self.default_mean = torch.Tensor(IMAGENET_INCEPTION_MEAN).view(1, 3, 1, 1)
self.default_std = torch.Tensor(IMAGENET_INCEPTION_STD).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, True, weight_keys="params"
)
# load_pretrained_network(self, pretrained_model_path, True, )
elif pretrained:
load_pretrained_network(self, default_model_urls["pipal"], True)
def extract_feature(self, save_output):
x6 = save_output.outputs[6][:, 1:]
x7 = save_output.outputs[7][:, 1:]
x8 = save_output.outputs[8][:, 1:]
x9 = save_output.outputs[9][:, 1:]
x = torch.cat((x6, x7, x8, x9), dim=2)
return x
def forward(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
if self.training:
x_patches = random_crop(x, sample_size=224, sample_num=1)
else:
x_patches = random_crop(x, sample_size=224, sample_num=self.test_sample)
bsz, num_patches, c, psz, psz = x_patches.shape
x = x_patches.reshape(bsz * num_patches, c, psz, psz)
_x = self.vit(x)
x = self.extract_feature(self.save_output)
self.save_output.outputs.clear()
# stage 1
x = rearrange(x, "b (h w) c -> b c (h w)", h=self.input_size, w=self.input_size)
for tab in self.tablock1:
x = tab(x)
x = rearrange(x, "b c (h w) -> b c h w", h=self.input_size, w=self.input_size)
x = self.conv1(x)
x = self.swintransformer1(x)
# stage2
x = rearrange(x, "b c h w -> b c (h w)", h=self.input_size, w=self.input_size)
for tab in self.tablock2:
x = tab(x)
x = rearrange(x, "b c (h w) -> b c h w", h=self.input_size, w=self.input_size)
x = self.conv2(x)
x = self.swintransformer2(x)
x = rearrange(x, "b c h w -> b (h w) c", h=self.input_size, w=self.input_size)
per_patch_score = self.fc_score(x)
per_patch_score = per_patch_score.reshape(bsz, -1)
per_patch_weight = self.fc_weight(x)
per_patch_weight = per_patch_weight.reshape(bsz, -1)
score = (per_patch_weight * per_patch_score).sum(dim=-1) / (
per_patch_weight.sum(dim=-1) + 1e-8
)
return score.unsqueeze(1)
| 7,322 | 30.83913 | 112 | py |
BVQI | BVQI-master/pyiqa/archs/dbcnn_arch.py | r"""DBCNN Metric
Created by: https://github.com/zwx8981/DBCNN-PyTorch/blob/master/DBCNN.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"csiq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_CSIQ-8677d071.pth",
"tid2008": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_TID2008-4b47c5d1.pth",
"tid2013": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_TID2013-485d021d.pth",
"live": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_LIVE-97262bf4.pth",
"livec": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_LIVEC-83f6dad3.pth",
"livem": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_LIVEM-698474e3.pth",
"koniq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_KonIQ10k-254e8241.pth",
}
class SCNN(nn.Module):
"""Network branch for synthetic distortions.
Args:
use_bn (Boolean): Whether to use batch normalization.
Modified from https://github.com/zwx8981/DBCNN-PyTorch/blob/master/SCNN.py
"""
def __init__(self, use_bn=True):
super(SCNN, self).__init__()
self.num_class = 39
self.use_bn = use_bn
self.features = nn.Sequential(
*self._make_layers(3, 48, 3, 1, 1),
*self._make_layers(48, 48, 3, 2, 1),
*self._make_layers(48, 64, 3, 1, 1),
*self._make_layers(64, 64, 3, 2, 1),
*self._make_layers(64, 64, 3, 1, 1),
*self._make_layers(64, 64, 3, 2, 1),
*self._make_layers(64, 128, 3, 1, 1),
*self._make_layers(128, 128, 3, 1, 1),
*self._make_layers(128, 128, 3, 2, 1),
)
self.pooling = nn.AdaptiveAvgPool2d(1)
self.projection = nn.Sequential(
*self._make_layers(128, 256, 1, 1, 0),
*self._make_layers(256, 256, 1, 1, 0),
)
self.classifier = nn.Linear(256, self.num_class)
def _make_layers(self, in_ch, out_ch, ksz, stride, pad):
if self.use_bn:
layers = [
nn.Conv2d(in_ch, out_ch, ksz, stride, pad),
nn.BatchNorm2d(out_ch),
nn.ReLU(True),
]
else:
layers = [
nn.Conv2d(in_ch, out_ch, ksz, stride, pad),
nn.ReLU(True),
]
return layers
def forward(self, X):
X = self.features(X)
X = self.pooling(X)
X = self.projection(X)
X = X.view(X.shape[0], -1)
X = self.classifier(X)
return X
@ARCH_REGISTRY.register()
class DBCNN(nn.Module):
"""Full DBCNN network.
Args:
fc (Boolean): Whether initialize the fc layers.
use_bn (Boolean): Whether use batch normalization.
pretrained_scnn_path (String): Pretrained scnn path.
default_mean (list): Default mean value.
default_std (list): Default std value.
Reference:
Zhang, Weixia, et al. "Blind image quality assessment using
a deep bilinear convolutional neural network." IEEE Transactions
on Circuits and Systems for Video Technology 30.1 (2018): 36-47.
"""
def __init__(
self,
fc=True,
use_bn=True,
pretrained_scnn_path=None,
pretrained=True,
pretrained_model_path=None,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
):
super(DBCNN, self).__init__()
# Convolution and pooling layers of VGG-16.
self.features1 = torchvision.models.vgg16(pretrained=True).features
self.features1 = nn.Sequential(*list(self.features1.children())[:-1])
scnn = SCNN(use_bn=use_bn)
if pretrained_scnn_path is not None:
load_pretrained_network(scnn, pretrained_scnn_path)
self.features2 = scnn.features
# Linear classifier.
self.fc = torch.nn.Linear(512 * 128, 1)
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if fc:
# Freeze all previous layers.
for param in self.features1.parameters():
param.requires_grad = False
for param in scnn.parameters():
param.requires_grad = False
# Initialize the fc layers.
nn.init.kaiming_normal_(self.fc.weight.data)
if self.fc.bias is not None:
nn.init.constant_(self.fc.bias.data, val=0)
if pretrained_model_path is None and pretrained:
url_key = "koniq" if isinstance(pretrained, bool) else pretrained
pretrained_model_path = default_model_urls[url_key]
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, True, "params")
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
def forward(self, X):
r"""Compute IQA using DBCNN model.
Args:
X: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of DBCNN model.
"""
X = self.preprocess(X)
X1 = self.features1(X)
X2 = self.features2(X)
N, _, H, W = X1.shape
N, _, H2, W2 = X2.shape
if (H != H2) or (W != W2):
X2 = F.interpolate(X2, (H, W), mode="bilinear", align_corners=True)
X1 = X1.view(N, 512, H * W)
X2 = X2.view(N, 128, H * W)
X = torch.bmm(X1, torch.transpose(X2, 1, 2)) / (H * W) # Bilinear
X = X.view(N, 512 * 128)
X = torch.sqrt(X + 1e-8)
X = torch.nn.functional.normalize(X)
X = self.fc(X)
return X
| 6,123 | 32.464481 | 116 | py |
BVQI | BVQI-master/pyiqa/archs/pieapp_arch.py | r"""PieAPP metric, proposed by
Prashnani, Ekta, Hong Cai, Yasamin Mostofi, and Pradeep Sen.
"Pieapp: Perceptual image-error assessment through pairwise preference."
In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1808-1817. 2018.
Ref url: https://github.com/prashnani/PerceptualImageError/blob/master/model/PieAPPv0pt1_PT.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
!!! Important Note: to keep simple test process and fair comparison with other methods,
we use zero padding and extract subpatches only once
rather than from multiple subimages as the original codes.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import extract_2d_patches
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/PieAPPv0.1-0937b014.pth"
}
class CompactLinear(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.parameter.Parameter(torch.randn(1))
self.bias = nn.parameter.Parameter(torch.randn(1))
def forward(self, x):
return x * self.weight + self.bias
@ARCH_REGISTRY.register()
class PieAPP(nn.Module):
def __init__(
self, patch_size=64, stride=27, pretrained=True, pretrained_model_path=None
):
super(PieAPP, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 128, 3, padding=1)
self.pool4 = nn.MaxPool2d(2, 2)
self.conv5 = nn.Conv2d(128, 128, 3, padding=1)
self.conv6 = nn.Conv2d(128, 128, 3, padding=1)
self.pool6 = nn.MaxPool2d(2, 2)
self.conv7 = nn.Conv2d(128, 256, 3, padding=1)
self.conv8 = nn.Conv2d(256, 256, 3, padding=1)
self.pool8 = nn.MaxPool2d(2, 2)
self.conv9 = nn.Conv2d(256, 256, 3, padding=1)
self.conv10 = nn.Conv2d(256, 512, 3, padding=1)
self.pool10 = nn.MaxPool2d(2, 2)
self.conv11 = nn.Conv2d(512, 512, 3, padding=1)
self.fc1_score = nn.Linear(120832, 512)
self.fc2_score = nn.Linear(512, 1)
self.fc1_weight = nn.Linear(2048, 512)
self.fc2_weight = nn.Linear(512, 1)
self.ref_score_subtract = CompactLinear()
self.patch_size = patch_size
self.stride = stride
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"])
self.pretrained = pretrained
def flatten(self, matrix): # takes NxCxHxW input and outputs NxHWC
return torch.flatten(matrix, 1)
def compute_features(self, input):
# conv1 -> relu -> conv2 -> relu -> pool2 -> conv3 -> relu
x3 = F.relu(
self.conv3(self.pool2(F.relu(self.conv2(F.relu(self.conv1(input))))))
)
# conv4 -> relu -> pool4 -> conv5 -> relu
x5 = F.relu(self.conv5(self.pool4(F.relu(self.conv4(x3)))))
# conv6 -> relu -> pool6 -> conv7 -> relu
x7 = F.relu(self.conv7(self.pool6(F.relu(self.conv6(x5)))))
# conv8 -> relu -> pool8 -> conv9 -> relu
x9 = F.relu(self.conv9(self.pool8(F.relu(self.conv8(x7)))))
# conv10 -> relu -> pool10 -> conv11 -> relU
x11 = self.flatten(F.relu(self.conv11(self.pool10(F.relu(self.conv10(x9))))))
# flatten and concatenate
feature_ms = torch.cat(
(
self.flatten(x3),
self.flatten(x5),
self.flatten(x7),
self.flatten(x9),
x11,
),
1,
)
return feature_ms, x11
def preprocess(self, x):
"""Default BGR in [0, 255] in original codes"""
x = x[:, [2, 1, 0]] * 255.0
return x
def forward(self, dist, ref):
assert (
dist.shape == ref.shape
), f"Input and reference images should have the same shape, but got {dist.shape}"
f" and {ref.shape}"
if self.pretrained:
dist = self.preprocess(dist)
ref = self.preprocess(ref)
image_A_patches = extract_2d_patches(
dist, self.patch_size, self.stride, padding="none"
)
image_ref_patches = extract_2d_patches(
ref, self.patch_size, self.stride, padding="none"
)
bsz, num_patches, c, psz, psz = image_A_patches.shape
image_A_patches = image_A_patches.reshape(bsz * num_patches, c, psz, psz)
image_ref_patches = image_ref_patches.reshape(bsz * num_patches, c, psz, psz)
A_multi_scale, A_coarse = self.compute_features(image_A_patches)
ref_multi_scale, ref_coarse = self.compute_features(image_ref_patches)
diff_ms = ref_multi_scale - A_multi_scale
diff_coarse = ref_coarse - A_coarse
# per patch score: fc1_score -> relu -> fc2_score
per_patch_score = self.ref_score_subtract(
0.01 * self.fc2_score(F.relu(self.fc1_score(diff_ms)))
)
per_patch_score = per_patch_score.view((-1, num_patches))
# per patch weight: fc1_weight -> relu -> fc2_weight
per_patch_weight = self.fc2_weight(F.relu(self.fc1_weight(diff_coarse))) + 1e-6
per_patch_weight = per_patch_weight.view((-1, num_patches))
score = (per_patch_weight * per_patch_score).sum(dim=-1) / per_patch_weight.sum(
dim=-1
)
return score.squeeze()
| 5,788 | 37.852349 | 108 | py |
BVQI | BVQI-master/pyiqa/archs/lpips_arch.py | r"""LPIPS Model.
Created by: https://github.com/richzhang/PerceptualSimilarity.
Modified by: Jiadi Mo (https://github.com/JiadiMo)
"""
from collections import namedtuple
import torch
import torch.nn as nn
from torchvision import models
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
# key "url" is the default
"0.0_alex": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.0_alex-18720f55.pth",
"0.0_vgg": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.0_vgg-b9e42362.pth",
"0.0_squeeze": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.0_squeeze-c27abd3a.pth",
"0.1_alex": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.1_alex-df73285e.pth",
"0.1_vgg": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.1_vgg-a78928a0.pth",
"0.1_squeeze": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/LPIPS_v0.1_squeeze-4a5350f2.pth",
}
def upsample(in_tens, out_HW=(64, 64)): # assumes scale factor is same for H and W
return nn.Upsample(size=out_HW, mode="bilinear", align_corners=False)(in_tens)
def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2, 3], keepdim=keepdim)
def normalize_tensor(in_feat, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True))
return in_feat / (norm_factor + eps)
@ARCH_REGISTRY.register()
class LPIPS(nn.Module):
"""LPIPS model.
Args:
lpips (Boolean) : Whether to use linear layers on top of base/trunk network.
pretrained (Boolean): Whether means linear layers are calibrated with human
perceptual judgments.
pnet_rand (Boolean): Whether to randomly initialized trunk.
net (String): ['alex','vgg','squeeze'] are the base/trunk networks available.
version (String): choose the version ['v0.1'] is the default and latest;
['v0.0'] contained a normalization bug.
pretrained_model_path (String): Petrained model path.
The following parameters should only be changed if training the network:
eval_mode (Boolean): choose the mode; True is for test mode (default).
pnet_tune (Boolean): Whether to tune the base/trunk network.
use_dropout (Boolean): Whether to use dropout when training linear layers.
Reference:
Zhang, Richard, et al. "The unreasonable effectiveness of deep features as
a perceptual metric." Proceedings of the IEEE conference on computer vision
and pattern recognition. 2018.
"""
def __init__(
self,
pretrained=True,
net="alex",
version="0.1",
lpips=True,
spatial=False,
pnet_rand=False,
pnet_tune=False,
use_dropout=True,
pretrained_model_path=None,
eval_mode=True,
**kwargs,
):
super(LPIPS, self).__init__()
self.pnet_type = net
self.pnet_tune = pnet_tune
self.pnet_rand = pnet_rand
self.spatial = spatial
self.lpips = lpips # false means baseline of just averaging all layers
self.version = version
self.scaling_layer = ScalingLayer()
if self.pnet_type in ["vgg", "vgg16"]:
net_type = vgg16
self.chns = [64, 128, 256, 512, 512]
elif self.pnet_type == "alex":
net_type = alexnet
self.chns = [64, 192, 384, 256, 256]
elif self.pnet_type == "squeeze":
net_type = squeezenet
self.chns = [64, 128, 256, 384, 384, 512, 512]
self.L = len(self.chns)
self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
if lpips:
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
if self.pnet_type == "squeeze": # 7 layers for squeezenet
self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
self.lins += [self.lin5, self.lin6]
self.lins = nn.ModuleList(self.lins)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, False)
elif pretrained:
load_pretrained_network(
self, default_model_urls[f"{version}_{net}"], False
)
if eval_mode:
self.eval()
def forward(self, in1, in0, retPerLayer=False, normalize=True):
r"""Computation IQA using LPIPS.
Args:
in1: An input tensor. Shape :math:`(N, C, H, W)`.
in0: A reference tensor. Shape :math:`(N, C, H, W)`.
retPerLayer (Boolean): return result contains ressult of
each layer or not. Default: False.
normalize (Boolean): Whether to normalize image data range
in [0,1] to [-1,1]. Default: True.
Returns:
Quality score.
"""
if (
normalize
): # turn on this flag if input is [0,1] so it can be adjusted to [-1, +1]
in0 = 2 * in0 - 1
in1 = 2 * in1 - 1
# v0.0 - original release had a bug, where input was not scaled
in0_input, in1_input = (
(self.scaling_layer(in0), self.scaling_layer(in1))
if self.version == "0.1"
else (in0, in1)
)
outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
feats0, feats1, diffs = {}, {}, {}
for kk in range(self.L):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(
outs1[kk]
)
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
if self.lpips:
if self.spatial:
res = [
upsample(self.lins[kk](diffs[kk]), out_HW=in0.shape[2:])
for kk in range(self.L)
]
else:
res = [
spatial_average(self.lins[kk](diffs[kk]), keepdim=True)
for kk in range(self.L)
]
else:
if self.spatial:
res = [
upsample(diffs[kk].sum(dim=1, keepdim=True), out_HW=in0.shape[2:])
for kk in range(self.L)
]
else:
res = [
spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True)
for kk in range(self.L)
]
val = 0
for i in range(self.L):
val += res[i]
if retPerLayer:
return (val, res)
else:
return val.squeeze()
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer(
"shift", torch.Tensor([-0.030, -0.088, -0.188])[None, :, None, None]
)
self.register_buffer(
"scale", torch.Tensor([0.458, 0.448, 0.450])[None, :, None, None]
)
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
"""A single linear layer which does a 1x1 conv"""
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = (
[
nn.Dropout(),
]
if (use_dropout)
else []
)
layers += [
nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),
]
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = models.squeezenet1_1(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
vgg_outputs = namedtuple(
"SqueezeOutputs",
["relu1", "relu2", "relu3", "relu4", "relu5", "relu6", "relu7"],
)
out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7)
return out
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple(
"AlexnetOutputs", ["relu1", "relu2", "relu3", "relu4", "relu5"]
)
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple(
"VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"]
)
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
class resnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True, num=18):
super(resnet, self).__init__()
if num == 18:
self.net = models.resnet18(pretrained=pretrained)
elif num == 34:
self.net = models.resnet34(pretrained=pretrained)
elif num == 50:
self.net = models.resnet50(pretrained=pretrained)
elif num == 101:
self.net = models.resnet101(pretrained=pretrained)
elif num == 152:
self.net = models.resnet152(pretrained=pretrained)
self.N_slices = 5
self.conv1 = self.net.conv1
self.bn1 = self.net.bn1
self.relu = self.net.relu
self.maxpool = self.net.maxpool
self.layer1 = self.net.layer1
self.layer2 = self.net.layer2
self.layer3 = self.net.layer3
self.layer4 = self.net.layer4
def forward(self, X):
h = self.conv1(X)
h = self.bn1(h)
h = self.relu(h)
h_relu1 = h
h = self.maxpool(h)
h = self.layer1(h)
h_conv2 = h
h = self.layer2(h)
h_conv3 = h
h = self.layer3(h)
h_conv4 = h
h = self.layer4(h)
h_conv5 = h
outputs = namedtuple("Outputs", ["relu1", "conv2", "conv3", "conv4", "conv5"])
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
return out
| 14,838 | 34.670673 | 132 | py |
BVQI | BVQI-master/pyiqa/archs/hypernet_arch.py | r"""HyperNet Metric
Created by: https://github.com/SSL92/hyperIQA
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import timm
import torch
import torch.nn as nn
from pyiqa.utils.registry import ARCH_REGISTRY
@ARCH_REGISTRY.register()
class HyperNet(nn.Module):
"""HyperNet Model.
Args:
base_model_name (String): pretrained model to extract features,
can be any models supported by timm. Default: resnet50.
pretrained_model_path (String): Pretrained model path.
default_mean (list): Default mean value.
default_std (list): Default std value.
Reference:
Su, Shaolin, Qingsen Yan, Yu Zhu, Cheng Zhang, Xin Ge,
Jinqiu Sun, and Yanning Zhang. "Blindly assess image
quality in the wild guided by a self-adaptive hyper network."
In Proceedings of the IEEE/CVF Conference on Computer Vision
and Pattern Recognition (CVPR), pp. 3667-3676. 2020.
"""
def __init__(
self,
base_model_name="resnet50",
pretrained_model_path=None,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
):
super(HyperNet, self).__init__()
self.base_model = timm.create_model(
base_model_name, pretrained=True, features_only=True
)
lda_out_channels = 16
hyper_in_channels = 112
target_in_size = 224
hyper_fc_channels = [112, 56, 28, 14, 1]
feature_size = 7 # spatial size of the last features from base model
self.hyper_fc_channels = hyper_fc_channels
# local distortion aware module
self.lda_modules = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(16 * 64, lda_out_channels),
),
nn.Sequential(
nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(32 * 16, lda_out_channels),
),
nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(64 * 4, lda_out_channels),
),
nn.Sequential(
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(2048, target_in_size - lda_out_channels * 3),
),
]
)
# Hyper network part, conv for generating target fc weights, fc for generating target fc biases
self.fc_w_modules = nn.ModuleList([])
for i in range(4):
if i == 0:
out_ch = int(target_in_size * hyper_fc_channels[i] / feature_size ** 2)
else:
out_ch = int(
hyper_fc_channels[i - 1] * hyper_fc_channels[i] / feature_size ** 2
)
self.fc_w_modules.append(
nn.Conv2d(hyper_in_channels, out_ch, 3, padding=(1, 1)),
)
self.fc_w_modules.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(hyper_in_channels, hyper_fc_channels[3]),
)
)
self.fc_b_modules = nn.ModuleList([])
for i in range(5):
self.fc_b_modules.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(hyper_in_channels, hyper_fc_channels[i]),
)
)
# Conv layers for resnet output features
self.conv1 = nn.Sequential(
nn.Conv2d(2048, 1024, 1, padding=(0, 0)),
nn.ReLU(inplace=True),
nn.Conv2d(1024, 512, 1, padding=(0, 0)),
nn.ReLU(inplace=True),
nn.Conv2d(512, hyper_in_channels, 1, padding=(0, 0)),
nn.ReLU(inplace=True),
)
self.global_pool = nn.Sequential()
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
def load_pretrained_network(self, model_path):
state_dict = torch.load(model_path, map_location=torch.device("cpu"))[
"state_dict"
]
self.net.load_state_dict(state_dict, strict=True)
def preprocess(self, x):
# input must have shape of (224, 224) because of network design
if x.shape[2:] != torch.Size([224, 224]):
x = nn.functional.interpolate(x, (224, 224), mode="bicubic")
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
def random_crop_test(self, x, sample_num=25):
b, c, h, w = x.shape
th = tw = 224
cropped_x = []
for s in range(sample_num):
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
cropped_x.append(x[:, :, i : i + th, j : j + tw])
cropped_x = torch.cat(cropped_x, dim=0)
results = self.forward_patch(cropped_x)
results = results.reshape(sample_num, b).mean(dim=0)
return results.unsqueeze(-1)
def forward_patch(self, x):
assert x.shape[2:] == torch.Size(
[224, 224]
), f"Input patch size must be (224, 224), but got {x.shape[2:]}"
x = self.preprocess(x)
base_feats = self.base_model(x)[1:]
# multi-scale local distortion aware features
lda_feat_list = []
for bf, ldam in zip(base_feats, self.lda_modules):
lda_feat_list.append(ldam(bf))
lda_feat = torch.cat(lda_feat_list, dim=1)
# calculate target net weights & bias
target_fc_w = []
target_fc_b = []
hyper_in_feat = self.conv1(base_feats[-1])
batch_size = hyper_in_feat.shape[0]
for i in range(len(self.fc_w_modules)):
tmp_fc_w = self.fc_w_modules[i](hyper_in_feat).reshape(
batch_size, self.hyper_fc_channels[i], -1
)
target_fc_w.append(tmp_fc_w)
target_fc_b.append(self.fc_b_modules[i](hyper_in_feat))
# get final IQA score
x = lda_feat.unsqueeze(1)
for i in range(len(target_fc_w)):
if i != 4:
x = torch.sigmoid(
torch.bmm(x, target_fc_w[i].transpose(1, 2))
+ target_fc_b[i].unsqueeze(1)
)
else:
x = torch.bmm(x, target_fc_w[i].transpose(1, 2)) + target_fc_b[
i
].unsqueeze(1)
return x.squeeze(-1)
def forward(self, x):
r"""HYPERNET model.
Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
"""
# imagenet normalization of input is hard coded
if self.training:
return self.forward_patch(x)
else:
return self.random_crop_test(x)
| 7,284 | 34.364078 | 103 | py |
BVQI | BVQI-master/pyiqa/archs/ssim_arch.py | r"""SSIM, MS-SSIM, CW-SSIM Metric
Created by:
- https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/SSIM.py
- https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/MS_SSIM.py
- https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/CW_SSIM.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
- Offical SSIM matlab code from https://www.cns.nyu.edu/~lcv/ssim/;
- PIQ from https://github.com/photosynthesis-team/piq;
- BasicSR from https://github.com/xinntao/BasicSR/blob/master/basicsr/metrics/psnr_ssim.py;
- Offical MS-SSIM matlab code from https://ece.uwaterloo.ca/~z70wang/research/iwssim/msssim.zip;
- Offical CW-SSIM matlab code from
https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/43017/versions/1/download/zip;
"""
import numpy as np
import torch
import torch.nn.functional as F
from pyiqa.matlab_utils import SCFpyr_PyTorch, filter2, fspecial, math_util
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def ssim(
X,
Y,
win,
get_ssim_map=False,
get_cs=False,
get_weight=False,
downsample=False,
data_range=1.0,
test_y_channel=True,
color_space="yiq",
):
data_range = 255
# Whether calculate on y channel of ycbcr
if test_y_channel and X.shape[1] == 3:
X = to_y_channel(X, data_range, color_space)
Y = to_y_channel(Y, data_range, color_space)
else:
X = X * data_range
X = X - X.detach() + X.round()
Y = Y * data_range
Y = Y - Y.detach() + Y.round()
C1 = (0.01 * data_range) ** 2
C2 = (0.03 * data_range) ** 2
# Averagepool image if the size is large enough
f = max(1, round(min(X.size()[-2:]) / 256))
# Downsample operation is used in official matlab code
if (f > 1) and downsample:
X = F.avg_pool2d(X, kernel_size=f)
Y = F.avg_pool2d(Y, kernel_size=f)
win = win.to(X.device)
mu1 = filter2(X, win, "valid")
mu2 = filter2(Y, win, "valid")
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = filter2(X * X, win, "valid") - mu1_sq
sigma2_sq = filter2(Y * Y, win, "valid") - mu2_sq
sigma12 = filter2(X * Y, win, "valid") - mu1_mu2
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
cs_map = F.relu(
cs_map
) # force the ssim response to be nonnegative to avoid negative results.
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
ssim_val = ssim_map.mean([1, 2, 3])
if get_weight:
weights = torch.log((1 + sigma1_sq / C2) * (1 + sigma2_sq / C2))
return ssim_map, weights
if get_ssim_map:
return ssim_map
if get_cs:
return ssim_val, cs_map.mean([1, 2, 3])
return ssim_val
@ARCH_REGISTRY.register()
class SSIM(torch.nn.Module):
r"""Args:
channel: number of channel.
downsample: boolean, whether to downsample same as official matlab code.
test_y_channel: boolean, whether to use y channel on ycbcr same as official matlab code.
"""
def __init__(
self,
channels=3,
downsample=False,
test_y_channel=True,
color_space="yiq",
crop_border=0.0,
):
super(SSIM, self).__init__()
self.win = fspecial(11, 1.5, channels)
self.downsample = downsample
self.test_y_channel = test_y_channel
self.color_space = color_space
self.crop_border = crop_border
def forward(self, X, Y):
assert (
X.shape == Y.shape
), f"Input {X.shape} and reference images should have the same shape"
if self.crop_border != 0:
crop_border = self.crop_border
X = X[..., crop_border:-crop_border, crop_border:-crop_border]
Y = Y[..., crop_border:-crop_border, crop_border:-crop_border]
score = ssim(
X,
Y,
win=self.win,
downsample=self.downsample,
test_y_channel=self.test_y_channel,
color_space=self.color_space,
)
return score
def ms_ssim(
X,
Y,
win,
data_range=1.0,
downsample=False,
test_y_channel=True,
is_prod=True,
color_space="yiq",
):
r"""Compute Multiscale structural similarity for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
win: Window setting.
downsample: Boolean, whether to downsample which mimics official SSIM matlab code.
test_y_channel: Boolean, whether to use y channel on ycbcr.
is_prod: Boolean, calculate product or sum between mcs and weight.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
"""
if not X.shape == Y.shape:
raise ValueError("Input images must have the same dimensions.")
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(X)
levels = weights.shape[0]
mcs = []
for _ in range(levels):
ssim_val, cs = ssim(
X,
Y,
win=win,
get_cs=True,
downsample=downsample,
data_range=data_range,
test_y_channel=test_y_channel,
color_space=color_space,
)
mcs.append(cs)
padding = (X.shape[2] % 2, X.shape[3] % 2)
X = F.avg_pool2d(X, kernel_size=2, padding=padding)
Y = F.avg_pool2d(Y, kernel_size=2, padding=padding)
mcs = torch.stack(mcs, dim=0)
if is_prod:
msssim_val = torch.prod((mcs[:-1] ** weights[:-1].unsqueeze(1)), dim=0) * (
ssim_val ** weights[-1]
)
else:
weights = weights / torch.sum(weights)
msssim_val = torch.sum((mcs[:-1] * weights[:-1].unsqueeze(1)), dim=0) + (
ssim_val * weights[-1]
)
return msssim_val
@ARCH_REGISTRY.register()
class MS_SSIM(torch.nn.Module):
r"""Multiscale structure similarity
References:
Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale structural similarity for image
quality assessment." In The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers,
2003, vol. 2, pp. 1398-1402. Ieee, 2003.
Args:
channel: Number of channel.
downsample: Boolean, whether to downsample which mimics official SSIM matlab code.
test_y_channel: Boolean, whether to use y channel on ycbcr which mimics official matlab code.
"""
def __init__(
self,
channels=3,
downsample=False,
test_y_channel=True,
is_prod=True,
color_space="yiq",
):
super(MS_SSIM, self).__init__()
self.win = fspecial(11, 1.5, channels)
self.downsample = downsample
self.test_y_channel = test_y_channel
self.color_space = color_space
self.is_prod = is_prod
def forward(self, X, Y):
"""Computation of MS-SSIM metric.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of MS-SSIM metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), "Input and reference images should have the same shape, but got"
f"{X.shape} and {Y.shape}"
score = ms_ssim(
X,
Y,
win=self.win,
downsample=self.downsample,
test_y_channel=self.test_y_channel,
is_prod=self.is_prod,
color_space=self.color_space,
)
return score
@ARCH_REGISTRY.register()
class CW_SSIM(torch.nn.Module):
r"""Complex-Wavelet Structural SIMilarity (CW-SSIM) index.
References:
M. P. Sampat, Z. Wang, S. Gupta, A. C. Bovik, M. K. Markey.
"Complex Wavelet Structural Similarity: A New Image Similarity Index",
IEEE Transactions on Image Processing, 18(11), 2385-401, 2009.
Args:
channel: Number of channel.
test_y_channel: Boolean, whether to use y channel on ycbcr.
level: The number of levels to used in the complex steerable pyramid decomposition
ori: The number of orientations to be used in the complex steerable pyramid decomposition
guardb: How much is discarded from the four image boundaries.
K: the constant in the CWSSIM index formula (see the above reference) default value: K=0
"""
def __init__(
self,
channels=1,
level=4,
ori=8,
guardb=0,
K=0,
test_y_channel=True,
color_space="yiq",
):
super(CW_SSIM, self).__init__()
self.channels = channels
self.level = level
self.ori = ori
self.guardb = guardb
self.K = K
self.test_y_channel = test_y_channel
self.color_space = color_space
self.register_buffer("win7", torch.ones(channels, 1, 7, 7) / (7 * 7))
def conj(self, x, y):
a = x[..., 0]
b = x[..., 1]
c = y[..., 0]
d = -y[..., 1]
return torch.stack((a * c - b * d, b * c + a * d), dim=1)
def conv2d_complex(self, x, win, groups=1):
real = F.conv2d(x[:, 0, ...].unsqueeze(1), win, groups=groups)
imaginary = F.conv2d(x[:, 1, ...].unsqueeze(1), win, groups=groups)
return torch.stack((real, imaginary), dim=-1)
def cw_ssim(self, x, y, test_y_channel):
r"""Compute CW-SSIM for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
test_y_channel: Boolean, whether to use y channel on ycbcr.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
"""
# Whether calculate on y channel of ycbcr
if test_y_channel and x.shape[1] == 3:
x = to_y_channel(x, 255, self.color_space)
y = to_y_channel(y, 255, self.color_space)
pyr = SCFpyr_PyTorch(
height=self.level, nbands=self.ori, scale_factor=2, device=x.device
)
cw_x = pyr.build(x)
cw_y = pyr.build(y)
bandind = self.level
band_cssim = []
s = np.array(cw_x[bandind][0].size()[1:3])
w = fspecial(s - 7 + 1, s[0] / 4, 1).to(x.device)
gb = int(self.guardb / (2 ** (self.level - 1)))
for i in range(self.ori):
band1 = cw_x[bandind][i]
band2 = cw_y[bandind][i]
band1 = band1[:, gb : s[0] - gb, gb : s[1] - gb, :]
band2 = band2[:, gb : s[0] - gb, gb : s[1] - gb, :]
corr = self.conj(band1, band2)
corr_band = self.conv2d_complex(corr, self.win7, groups=self.channels)
varr = (
(math_util.abs(band1)) ** 2 + (math_util.abs(band2)) ** 2
).unsqueeze(1)
varr_band = F.conv2d(
varr, self.win7, stride=1, padding=0, groups=self.channels
)
cssim_map = (2 * math_util.abs(corr_band) + self.K) / (varr_band + self.K)
band_cssim.append(
(cssim_map * w.repeat(cssim_map.shape[0], 1, 1, 1)).sum([2, 3]).mean(1)
)
return torch.stack(band_cssim, dim=1).mean(1)
def forward(self, X, Y):
r"""Computation of CW-SSIM metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of CW-SSIM metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input {X.shape} and reference images should have the same shape"
score = self.cw_ssim(X, Y, self.test_y_channel)
return score
| 11,902 | 31.433243 | 110 | py |
BVQI | BVQI-master/pyiqa/archs/ahiq_arch.py | import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyexpat import model
from timm.models.resnet import BasicBlock, Bottleneck
from timm.models.vision_transformer import Block
from torchvision.ops.deform_conv import DeformConv2d
from pyiqa.archs.arch_util import (
ExactPadding2d,
default_init_weights,
load_file_from_url,
load_pretrained_network,
to_2tuple,
)
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"pipal": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/AHIQ_vit_p8_epoch33-da3ea303.pth"
}
def random_crop(x, y, crop_size, crop_num):
b, c, h, w = x.shape
ch, cw = to_2tuple(crop_size)
crops_x = []
crops_y = []
for i in range(crop_num):
sh = np.random.randint(0, h - ch)
sw = np.random.randint(0, w - cw)
crops_x.append(x[..., sh : sh + ch, sw : sw + cw])
crops_y.append(y[..., sh : sh + ch, sw : sw + cw])
crops_x = torch.stack(crops_x, dim=1)
crops_y = torch.stack(crops_y, dim=1)
return crops_x.reshape(b * crop_num, c, ch, cw), crops_y.reshape(
b * crop_num, c, ch, cw
)
class SaveOutput:
def __init__(self):
self.outputs = {}
def __call__(self, module, module_in, module_out):
if module_out.device in self.outputs.keys():
self.outputs[module_out.device].append(module_out)
else:
self.outputs[module_out.device] = [module_out]
def clear(self, device):
self.outputs[device] = []
class DeformFusion(nn.Module):
def __init__(
self,
patch_size=8,
in_channels=768 * 5,
cnn_channels=256 * 3,
out_channels=256 * 3,
):
super().__init__()
# in_channels, out_channels, kernel_size, stride, padding
self.d_hidn = 512
if patch_size == 8:
stride = 1
else:
stride = 2
self.conv_offset = nn.Conv2d(in_channels, 2 * 3 * 3, 3, 1, 1)
self.deform = DeformConv2d(cnn_channels, out_channels, 3, 1, 1)
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=out_channels,
out_channels=self.d_hidn,
kernel_size=3,
padding=1,
stride=2,
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn,
out_channels=out_channels,
kernel_size=3,
padding=1,
stride=stride,
),
)
def forward(self, cnn_feat, vit_feat):
vit_feat = F.interpolate(vit_feat, size=cnn_feat.shape[-2:], mode="nearest")
offset = self.conv_offset(vit_feat)
deform_feat = self.deform(cnn_feat, offset)
deform_feat = self.conv1(deform_feat)
return deform_feat
class Pixel_Prediction(nn.Module):
def __init__(self, inchannels=768 * 5 + 256 * 3, outchannels=256, d_hidn=1024):
super().__init__()
self.d_hidn = d_hidn
self.down_channel = nn.Conv2d(inchannels, outchannels, kernel_size=1)
self.feat_smoothing = nn.Sequential(
nn.Conv2d(
in_channels=256 * 3, out_channels=self.d_hidn, kernel_size=3, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn, out_channels=512, kernel_size=3, padding=1
),
)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(),
)
self.conv_attent = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1), nn.Sigmoid()
)
self.conv = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1),
)
def forward(self, f_dis, f_ref, cnn_dis, cnn_ref):
f_dis = torch.cat((f_dis, cnn_dis), 1)
f_ref = torch.cat((f_ref, cnn_ref), 1)
f_dis = self.down_channel(f_dis)
f_ref = self.down_channel(f_ref)
f_cat = torch.cat((f_dis - f_ref, f_dis, f_ref), 1)
feat_fused = self.feat_smoothing(f_cat)
feat = self.conv1(feat_fused)
f = self.conv(feat)
w = self.conv_attent(feat)
pred = (f * w).sum(dim=-1).sum(dim=-1) / w.sum(dim=-1).sum(dim=-1)
return pred
@ARCH_REGISTRY.register()
class AHIQ(nn.Module):
def __init__(
self,
num_crop=20,
crop_size=224,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
pretrained=True,
pretrained_model_path=None,
):
super().__init__()
self.resnet50 = timm.create_model("resnet50", pretrained=True)
self.vit = timm.create_model("vit_base_patch8_224", pretrained=True)
self.fix_network(self.resnet50)
self.fix_network(self.vit)
self.deform_net = DeformFusion()
self.regressor = Pixel_Prediction()
# register hook to get intermediate features
self.init_saveoutput()
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, True, weight_keys="params"
)
elif pretrained:
weight_path = load_file_from_url(default_model_urls["pipal"])
checkpoint = torch.load(weight_path)
self.regressor.load_state_dict(checkpoint["regressor_model_state_dict"])
self.deform_net.load_state_dict(checkpoint["deform_net_model_state_dict"])
self.eps = 1e-12
self.crops = num_crop
self.crop_size = crop_size
def init_saveoutput(self):
self.save_output = SaveOutput()
hook_handles = []
for layer in self.resnet50.modules():
if isinstance(layer, Bottleneck):
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
for layer in self.vit.modules():
if isinstance(layer, Block):
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
def fix_network(self, model):
for p in model.parameters():
p.requires_grad = False
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
@torch.no_grad()
def get_vit_feature(self, x):
self.vit(x)
feat = torch.cat(
(
self.save_output.outputs[x.device][0][:, 1:, :],
self.save_output.outputs[x.device][1][:, 1:, :],
self.save_output.outputs[x.device][2][:, 1:, :],
self.save_output.outputs[x.device][3][:, 1:, :],
self.save_output.outputs[x.device][4][:, 1:, :],
),
dim=2,
)
self.save_output.clear(x.device)
return feat
@torch.no_grad()
def get_resnet_feature(self, x):
self.resnet50(x)
feat = torch.cat(
(
self.save_output.outputs[x.device][0],
self.save_output.outputs[x.device][1],
self.save_output.outputs[x.device][2],
),
dim=1,
)
self.save_output.clear(x.device)
return feat
def regress_score(self, dis, ref):
self.resnet50.eval()
self.vit.eval()
dis = self.preprocess(dis)
ref = self.preprocess(ref)
vit_dis = self.get_vit_feature(dis)
vit_ref = self.get_vit_feature(ref)
B, N, C = vit_ref.shape
H, W = 28, 28
vit_ref = vit_ref.transpose(1, 2).view(B, C, H, W)
vit_dis = vit_dis.transpose(1, 2).view(B, C, H, W)
cnn_dis = self.get_resnet_feature(dis)
cnn_ref = self.get_resnet_feature(ref)
cnn_dis = self.deform_net(cnn_dis, vit_ref)
cnn_ref = self.deform_net(cnn_ref, vit_ref)
score = self.regressor(vit_dis, vit_ref, cnn_dis, cnn_ref)
return score
def forward(self, x, y):
bsz = x.shape[0]
if self.crops > 1 and not self.training:
x, y = random_crop(x, y, self.crop_size, self.crops)
score = self.regress_score(x, y)
score = score.reshape(bsz, self.crops, 1)
score = score.mean(dim=1)
else:
score = self.regress_score(x, y)
return score
| 8,605 | 30.992565 | 119 | py |
BVQI | BVQI-master/pyiqa/archs/paq2piq_arch.py | r"""Paq2piq metric, proposed by
Ying, Zhenqiang, Haoran Niu, Praful Gupta, Dhruv Mahajan, Deepti Ghadiyaram, and Alan Bovik.
"From patches to pictures (PaQ-2-PiQ): Mapping the perceptual space of picture quality."
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3575-3585. 2020.
Ref url: https://github.com/baidut/paq2piq/blob/master/paq2piq/model.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import torch
import torch.nn as nn
import torchvision as tv
from torchvision.ops import RoIPool
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/"
"P2P_RoIPoolModel-fit.10.bs.120-ca69882e.pth",
}
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
@ARCH_REGISTRY.register()
class PAQ2PIQ(nn.Module):
def __init__(
self, backbone="resnet18", pretrained=True, pretrained_model_path=None
):
super(PAQ2PIQ, self).__init__()
if backbone == "resnet18":
model = tv.models.resnet18(pretrained=False)
cut = -2
spatial_scale = 1 / 32
self.blk_size = 20, 20
self.model_type = self.__class__.__name__
self.body = nn.Sequential(*list(model.children())[:cut])
self.head = nn.Sequential(
AdaptiveConcatPool2d(),
nn.Flatten(),
nn.BatchNorm1d(
1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
),
nn.Dropout(p=0.25, inplace=False),
nn.Linear(in_features=1024, out_features=512, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm1d(
512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
),
nn.Dropout(p=0.5, inplace=False),
nn.Linear(in_features=512, out_features=1, bias=True),
)
self.roi_pool = RoIPool((2, 2), spatial_scale)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"])
def forward(self, x):
im_data = x
batch_size = im_data.shape[0]
feats = self.body(im_data)
global_rois = torch.tensor([0, 0, x.shape[-1], x.shape[-2]]).reshape(1, 4).to(x)
feats = self.roi_pool(feats, [global_rois] * batch_size)
preds = self.head(feats)
return preds.view(batch_size, -1)
| 2,853 | 32.186047 | 106 | py |
BVQI | BVQI-master/pyiqa/archs/ckdn_arch.py | """CKDN model.
Created by: Chaofeng Chen (https://github.com/chaofengc)
Refer to:
https://github.com/researchmm/CKDN.
"""
import math
import torch
import torch.nn as nn
import torchvision as tv
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/CKDN_model_best-38b27dc6.pth"
}
model_urls = {
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.k = 3
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.head = 8
self.qse_1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.qse_2 = self._make_layer(block, 64, layers[0])
self.csp = self._make_layer(block, 128, layers[1], stride=2, dilate=False)
self.inplanes = 64
self.dte_1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.dte_2 = self._make_layer(block, 64, layers[0])
self.aux_csp = self._make_layer(block, 128, layers[1], stride=2, dilate=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc_ = nn.Sequential(
nn.Linear((512) * 1 * 1, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 1),
)
self.fc1_ = nn.Sequential(
nn.Linear((512) * 1 * 1, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 1),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward(self, x, y):
rest1 = x
dist1 = y
rest1 = self.qse_2(self.maxpool(self.qse_1(rest1)))
dist1 = self.dte_2(self.maxpool(self.dte_1(dist1)))
x = rest1 - dist1
x = self.csp(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
dr = torch.sigmoid(self.fc_(x))
return dr
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
keys = state_dict.keys()
for key in list(keys):
if "conv1" in key:
state_dict[key.replace("conv1", "qse_1")] = state_dict[key]
state_dict[key.replace("conv1", "dte_1")] = state_dict[key]
if "layer1" in key:
state_dict[key.replace("layer1", "qse_2")] = state_dict[key]
state_dict[key.replace("layer1", "dte_2")] = state_dict[key]
if "layer2" in key:
state_dict[key.replace("layer2", "csp")] = state_dict[key]
state_dict[key.replace("layer2", "aux_csp")] = state_dict[key]
model.load_state_dict(state_dict, strict=False)
return model
@ARCH_REGISTRY.register()
class CKDN(nn.Module):
r"""CKDN metric.
Args:
pretrained_model_path (String): The model path.
use_default_preprocess (Boolean): Whether use default preprocess, default: True.
default_mean (tuple): The mean value.
default_std (tuple): The std value.
Reference:
Zheng, Heliang, Huan Yang, Jianlong Fu, Zheng-Jun Zha, and Jiebo Luo.
"Learning conditional knowledge distillation for degraded-reference image
quality assessment." In Proceedings of the IEEE/CVF International Conference
on Computer Vision (ICCV), pp. 10242-10251. 2021.
"""
def __init__(
self,
pretrained=True,
pretrained_model_path=None,
use_default_preprocess=True,
default_mean=(0.485, 0.456, 0.406),
default_std=(0.229, 0.224, 0.225),
**kwargs,
):
super().__init__()
self.net = _resnet("resnet50", Bottleneck, [3, 4, 6, 3], True, True, **kwargs)
self.use_default_preprocess = use_default_preprocess
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"])
def _default_preprocess(self, x, y):
"""default preprocessing of CKDN: https://github.com/researchmm/CKDN
Useful when using this metric as losses.
Results are slightly different due to different resize behavior of PIL Image and pytorch interpolate function.
Args:
x, y:
shape, (N, C, H, W) in RGB format;
value range, 0 ~ 1
"""
scaled_size = int(math.floor(288 / 0.875))
x = tv.transforms.functional.resize(
x, scaled_size, tv.transforms.InterpolationMode.BICUBIC
)
y = tv.transforms.functional.resize(
y, scaled_size, tv.transforms.InterpolationMode.NEAREST
)
x = tv.transforms.functional.center_crop(x, 288)
y = tv.transforms.functional.center_crop(y, 288)
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
y = (y - self.default_mean.to(y)) / self.default_std.to(y)
return x, y
def forward(self, x, y):
r"""Compute IQA using CKDN model.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
y: An reference tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of CKDN model.
"""
if self.use_default_preprocess:
x, y = self._default_preprocess(x, y)
return self.net(x, y)
| 12,466 | 30.803571 | 118 | py |
BVQI | BVQI-master/pyiqa/archs/dists_arch.py | r"""DISTS metric
Created by: https://github.com/dingkeyan93/DISTS/blob/master/DISTS_pytorch/DISTS_pt.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DISTS_weights-f5e65c96.pth"
}
class L2pooling(nn.Module):
def __init__(self, filter_size=5, stride=2, channels=None, pad_off=0):
super(L2pooling, self).__init__()
self.padding = (filter_size - 2) // 2
self.stride = stride
self.channels = channels
a = np.hanning(filter_size)[1:-1]
g = torch.Tensor(a[:, None] * a[None, :])
g = g / torch.sum(g)
self.register_buffer(
"filter", g[None, None, :, :].repeat((self.channels, 1, 1, 1))
)
def forward(self, input):
input = input ** 2
out = F.conv2d(
input,
self.filter,
stride=self.stride,
padding=self.padding,
groups=input.shape[1],
)
return (out + 1e-12).sqrt()
@ARCH_REGISTRY.register()
class DISTS(torch.nn.Module):
r"""DISTS model.
Args:
pretrained_model_path (String): Pretrained model path.
"""
def __init__(self, pretrained=True, pretrained_model_path=None, **kwargs):
"""Refer to offical code https://github.com/dingkeyan93/DISTS"""
super(DISTS, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.stage1 = torch.nn.Sequential()
self.stage2 = torch.nn.Sequential()
self.stage3 = torch.nn.Sequential()
self.stage4 = torch.nn.Sequential()
self.stage5 = torch.nn.Sequential()
for x in range(0, 4):
self.stage1.add_module(str(x), vgg_pretrained_features[x])
self.stage2.add_module(str(4), L2pooling(channels=64))
for x in range(5, 9):
self.stage2.add_module(str(x), vgg_pretrained_features[x])
self.stage3.add_module(str(9), L2pooling(channels=128))
for x in range(10, 16):
self.stage3.add_module(str(x), vgg_pretrained_features[x])
self.stage4.add_module(str(16), L2pooling(channels=256))
for x in range(17, 23):
self.stage4.add_module(str(x), vgg_pretrained_features[x])
self.stage5.add_module(str(23), L2pooling(channels=512))
for x in range(24, 30):
self.stage5.add_module(str(x), vgg_pretrained_features[x])
for param in self.parameters():
param.requires_grad = False
self.register_buffer(
"mean", torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
)
self.register_buffer(
"std", torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)
)
self.chns = [3, 64, 128, 256, 512, 512]
self.register_parameter(
"alpha", nn.Parameter(torch.randn(1, sum(self.chns), 1, 1))
)
self.register_parameter(
"beta", nn.Parameter(torch.randn(1, sum(self.chns), 1, 1))
)
self.alpha.data.normal_(0.1, 0.01)
self.beta.data.normal_(0.1, 0.01)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, False)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"], False)
def forward_once(self, x):
h = (x - self.mean) / self.std
h = self.stage1(h)
h_relu1_2 = h
h = self.stage2(h)
h_relu2_2 = h
h = self.stage3(h)
h_relu3_3 = h
h = self.stage4(h)
h_relu4_3 = h
h = self.stage5(h)
h_relu5_3 = h
return [x, h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3]
def forward(self, x, y):
r"""Compute IQA using DISTS model.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
y: An reference tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of DISTS model.
"""
feats0 = self.forward_once(x)
feats1 = self.forward_once(y)
dist1 = 0
dist2 = 0
c1 = 1e-6
c2 = 1e-6
w_sum = self.alpha.sum() + self.beta.sum()
alpha = torch.split(self.alpha / w_sum, self.chns, dim=1)
beta = torch.split(self.beta / w_sum, self.chns, dim=1)
for k in range(len(self.chns)):
x_mean = feats0[k].mean([2, 3], keepdim=True)
y_mean = feats1[k].mean([2, 3], keepdim=True)
S1 = (2 * x_mean * y_mean + c1) / (x_mean ** 2 + y_mean ** 2 + c1)
dist1 = dist1 + (alpha[k] * S1).sum(1, keepdim=True)
x_var = ((feats0[k] - x_mean) ** 2).mean([2, 3], keepdim=True)
y_var = ((feats1[k] - y_mean) ** 2).mean([2, 3], keepdim=True)
xy_cov = (feats0[k] * feats1[k]).mean(
[2, 3], keepdim=True
) - x_mean * y_mean
S2 = (2 * xy_cov + c2) / (x_var + y_var + c2)
dist2 = dist2 + (beta[k] * S2).sum(1, keepdim=True)
score = 1 - (dist1 + dist2).squeeze()
return score
| 5,418 | 33.515924 | 111 | py |
BVQI | BVQI-master/pyiqa/archs/inception.py | """
File from: https://github.com/mseitzer/pytorch-fid
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from .arch_util import load_pretrained_network
# Inception weights ported to Pytorch from
FID_WEIGHTS_URL = "https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth" # noqa: E501
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3, # Final average pooling features
}
def __init__(
self,
output_blocks=(DEFAULT_BLOCK_INDEX,),
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True,
):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, "Last possible output block index is 3"
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = _inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2),
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2),
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp, resize_input=False, normalize_input=False):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if resize_input:
x = F.interpolate(x, size=(299, 299), mode="bilinear", align_corners=False)
if normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def _inception_v3(*args, **kwargs):
"""Wraps `torchvision.models.inception_v3`
Skips default weight inititialization if supported by torchvision version.
See https://github.com/mseitzer/pytorch-fid/issues/28.
"""
try:
version = tuple(map(int, torchvision.__version__.split(".")[:2]))
except ValueError:
# Just a caution against weird version strings
version = (0,)
if version >= (0, 6):
kwargs["init_weights"] = False
return torchvision.models.inception_v3(*args, **kwargs)
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = _inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
load_pretrained_network(inception, FID_WEIGHTS_URL)
return inception
class FIDInceptionA(torchvision.models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(
x, kernel_size=3, stride=1, padding=1, count_include_pad=False
)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(torchvision.models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(
x, kernel_size=3, stride=1, padding=1, count_include_pad=False
)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(
x, kernel_size=3, stride=1, padding=1, count_include_pad=False
)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
| 11,796 | 35.187117 | 140 | py |
BVQI | BVQI-master/pyiqa/archs/maniqa_swin.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from torch import nn
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1,
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
1
).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
window_size=7,
shift_size=0,
dim_mlp=1024.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.dim_mlp = dim_mlp
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert (
0 <= self.shift_size < self.window_size
), "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = self.dim_mlp
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(
attn_mask != 0, float(-100.0)
).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
else:
shifted_x = x
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(
x_windows, mask=self.attn_mask
) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size=7,
dim_mlp=1024,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
dim_mlp=dim_mlp,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, norm_layer=norm_layer
)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
x = rearrange(
x,
"b (h w) c -> b c h w",
h=self.input_resolution[0],
w=self.input_resolution[1],
)
x = F.relu(self.conv(x))
x = rearrange(x, "b c h w -> b (h w) c")
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class SwinTransformer(nn.Module):
def __init__(
self,
patches_resolution,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
embed_dim=256,
drop=0.1,
drop_rate=0.0,
drop_path_rate=0.1,
dropout=0.0,
window_size=7,
dim_mlp=1024,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
scale=0.8,
**kwargs,
):
super().__init__()
self.scale = scale
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.dropout = nn.Dropout(p=drop)
self.num_features = embed_dim
self.num_layers = len(depths)
self.patches_resolution = (patches_resolution[0], patches_resolution[1])
self.downsample = nn.Conv2d(
self.embed_dim, self.embed_dim, kernel_size=3, stride=2, padding=1
)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=self.embed_dim,
input_resolution=patches_resolution,
depth=self.depths[i_layer],
num_heads=self.num_heads[i_layer],
window_size=self.window_size,
dim_mlp=dim_mlp,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=dropout,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(self.depths[:i_layer]) : sum(self.depths[: i_layer + 1])
],
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
def forward(self, x):
x = self.dropout(x)
x = rearrange(x, "b c h w -> b (h w) c")
for layer in self.layers:
_x = x
x = layer(x)
x = self.scale * x + _x
x = rearrange(
x,
"b (h w) c -> b c h w",
h=self.patches_resolution[0],
w=self.patches_resolution[1],
)
return x
| 17,834 | 32.461538 | 104 | py |
BVQI | BVQI-master/pyiqa/archs/musiq_arch.py | r"""MUSIQ model.
Implemented by: Chaofeng Chen (https://github.com/chaofengc)
Refer to:
Official code from: https://github.com/google-research/google-research/tree/master/musiq
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyiqa.data.multiscale_trans_util import get_multiscale_patches
from pyiqa.utils.registry import ARCH_REGISTRY
from .arch_util import (
ExactPadding2d,
dist_to_mos,
excact_padding_2d,
load_pretrained_network,
)
default_model_urls = {
"ava": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_ava_ckpt-e8d3f067.pth",
"koniq10k": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_koniq_ckpt-e95806b9.pth",
"spaq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_spaq_ckpt-358bb6af.pth",
"paq2piq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_paq2piq_ckpt-364c0c84.pth",
"imagenet_pretrain": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_imagenet_pretrain-51d9b0a5.pth",
}
class StdConv(nn.Conv2d):
"""
Reference: https://github.com/joe-siyuan-qiao/WeightStandardization
"""
def forward(self, x):
# implement same padding
x = excact_padding_2d(x, self.kernel_size, self.stride, mode="same")
weight = self.weight
weight = weight - weight.mean((1, 2, 3), keepdim=True)
weight = weight / (weight.std((1, 2, 3), keepdim=True) + 1e-5)
return F.conv2d(x, weight, self.bias, self.stride)
class Bottleneck(nn.Module):
def __init__(self, inplanes, outplanes, stride=1):
super().__init__()
width = inplanes
self.conv1 = StdConv(inplanes, width, 1, 1, bias=False)
self.gn1 = nn.GroupNorm(32, width, eps=1e-4)
self.conv2 = StdConv(width, width, 3, 1, bias=False)
self.gn2 = nn.GroupNorm(32, width, eps=1e-4)
self.conv3 = StdConv(width, outplanes, 1, 1, bias=False)
self.gn3 = nn.GroupNorm(32, outplanes, eps=1e-4)
self.relu = nn.ReLU(True)
self.needs_projection = inplanes != outplanes or stride != 1
if self.needs_projection:
self.conv_proj = StdConv(inplanes, outplanes, 1, stride, bias=False)
self.gn_proj = nn.GroupNorm(32, outplanes, eps=1e-4)
def forward(self, x):
identity = x
if self.needs_projection:
identity = self.gn_proj(self.conv_proj(identity))
x = self.relu(self.gn1(self.conv1(x)))
x = self.relu(self.gn2(self.conv2(x)))
x = self.gn3(self.conv3(x))
out = self.relu(x + identity)
return out
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, dim, num_heads=6, bias=False, attn_drop=0.0, out_drop=0.0):
super().__init__()
assert dim % num_heads == 0, "dim should be divisible by num_heads"
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.query = nn.Linear(dim, dim, bias=bias)
self.key = nn.Linear(dim, dim, bias=bias)
self.value = nn.Linear(dim, dim, bias=bias)
self.attn_drop = nn.Dropout(attn_drop)
self.out = nn.Linear(dim, dim)
self.out_drop = nn.Dropout(out_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
q = self.query(x)
k = self.key(x)
v = self.value(x)
q = q.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = k.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
v = v.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask_h = mask.reshape(B, 1, N, 1)
mask_w = mask.reshape(B, 1, 1, N)
mask2d = mask_h * mask_w
attn = attn.masked_fill(mask2d == 0, -1e3)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.out(x)
x = self.out_drop(x)
return x
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
mlp_dim,
num_heads,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim, eps=1e-6)
self.attention = MultiHeadAttention(
dim, num_heads, bias=True, attn_drop=attn_drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim, eps=1e-6)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_dim, act_layer=act_layer, drop=drop
)
def forward(self, x, inputs_masks):
y = self.norm1(x)
y = self.attention(y, inputs_masks)
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class AddHashSpatialPositionEmbs(nn.Module):
"""Adds learnable hash-based spatial embeddings to the inputs."""
def __init__(self, spatial_pos_grid_size, dim):
super().__init__()
self.position_emb = nn.parameter.Parameter(
torch.randn(1, spatial_pos_grid_size * spatial_pos_grid_size, dim)
)
nn.init.normal_(self.position_emb, std=0.02)
def forward(self, inputs, inputs_positions):
return inputs + self.position_emb.squeeze(0)[inputs_positions.long()]
class AddScaleEmbs(nn.Module):
"""Adds learnable scale embeddings to the inputs."""
def __init__(self, num_scales, dim):
super().__init__()
self.scale_emb = nn.parameter.Parameter(torch.randn(num_scales, dim))
nn.init.normal_(self.scale_emb, std=0.02)
def forward(self, inputs, inputs_scale_positions):
return inputs + self.scale_emb[inputs_scale_positions.long()]
class TransformerEncoder(nn.Module):
def __init__(
self,
input_dim,
mlp_dim=1152,
attention_dropout_rate=0.0,
dropout_rate=0,
num_heads=6,
num_layers=14,
num_scales=3,
spatial_pos_grid_size=10,
use_scale_emb=True,
use_sinusoid_pos_emb=False,
):
super().__init__()
self.use_scale_emb = use_scale_emb
self.posembed_input = AddHashSpatialPositionEmbs(
spatial_pos_grid_size, input_dim
)
self.scaleembed_input = AddScaleEmbs(num_scales, input_dim)
self.cls = nn.parameter.Parameter(torch.zeros(1, 1, input_dim))
self.dropout = nn.Dropout(dropout_rate)
self.encoder_norm = nn.LayerNorm(input_dim, eps=1e-6)
self.transformer = nn.ModuleDict()
for i in range(num_layers):
self.transformer[f"encoderblock_{i}"] = TransformerBlock(
input_dim, mlp_dim, num_heads, dropout_rate, attention_dropout_rate
)
def forward(
self, x, inputs_spatial_positions, inputs_scale_positions, inputs_masks
):
n, _, c = x.shape
x = self.posembed_input(x, inputs_spatial_positions)
if self.use_scale_emb:
x = self.scaleembed_input(x, inputs_scale_positions)
cls_token = self.cls.repeat(n, 1, 1)
x = torch.cat([cls_token, x], dim=1)
cls_mask = torch.ones((n, 1)).to(inputs_masks)
inputs_mask = torch.cat([cls_mask, inputs_masks], dim=1)
x = self.dropout(x)
for k, m in self.transformer.items():
x = m(x, inputs_mask)
x = self.encoder_norm(x)
return x
@ARCH_REGISTRY.register()
class MUSIQ(nn.Module):
r"""
Evaluation:
- n_crops: currently only test with 1 crop evaluation
Reference:
Ke, Junjie, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang.
"Musiq: Multi-scale image quality transformer." In Proceedings of the
IEEE/CVF International Conference on Computer Vision (ICCV), pp. 5148-5157. 2021.
"""
def __init__(
self,
patch_size=32,
num_class=1,
hidden_size=384,
mlp_dim=1152,
attention_dropout_rate=0.0,
dropout_rate=0,
num_heads=6,
num_layers=14,
num_scales=3,
spatial_pos_grid_size=10,
use_scale_emb=True,
use_sinusoid_pos_emb=False,
pretrained=True,
pretrained_model_path=None,
# data opts
longer_side_lengths=[224, 384],
max_seq_len_from_original_res=-1,
):
super(MUSIQ, self).__init__()
resnet_token_dim = 64
self.patch_size = patch_size
self.data_preprocess_opts = {
"patch_size": patch_size,
"patch_stride": patch_size,
"hse_grid_size": spatial_pos_grid_size,
"longer_side_lengths": longer_side_lengths,
"max_seq_len_from_original_res": max_seq_len_from_original_res,
}
# set num_class to 10 if pretrained model used AVA dataset
# if not specified pretrained dataset, use AVA for default
if pretrained_model_path is None and pretrained:
url_key = "ava" if isinstance(pretrained, bool) else pretrained
num_class = 10 if url_key == "ava" else num_class
pretrained_model_path = default_model_urls[url_key]
self.conv_root = StdConv(3, resnet_token_dim, 7, 2, bias=False)
self.gn_root = nn.GroupNorm(32, resnet_token_dim, eps=1e-6)
self.root_pool = nn.Sequential(
nn.ReLU(True),
ExactPadding2d(3, 2, mode="same"),
nn.MaxPool2d(3, 2),
)
token_patch_size = patch_size // 4
self.block1 = Bottleneck(resnet_token_dim, resnet_token_dim * 4)
self.embedding = nn.Linear(
resnet_token_dim * 4 * token_patch_size ** 2, hidden_size
)
self.transformer_encoder = TransformerEncoder(
hidden_size,
mlp_dim,
attention_dropout_rate,
dropout_rate,
num_heads,
num_layers,
num_scales,
spatial_pos_grid_size,
use_scale_emb,
use_sinusoid_pos_emb,
)
if num_class > 1:
self.head = nn.Sequential(
nn.Linear(hidden_size, num_class),
nn.Softmax(dim=-1),
)
else:
self.head = nn.Linear(hidden_size, num_class)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, True)
def forward(self, x, return_mos=True, return_dist=False):
if not self.training:
# normalize inputs to [-1, 1] as the official code
x = (x - 0.5) * 2
x = get_multiscale_patches(x, **self.data_preprocess_opts)
assert len(x.shape) in [3, 4]
if len(x.shape) == 4:
b, num_crops, seq_len, dim = x.shape
x = x.reshape(b * num_crops, seq_len, dim)
else:
b, seq_len, dim = x.shape
num_crops = 1
inputs_spatial_positions = x[:, :, -3]
inputs_scale_positions = x[:, :, -2]
inputs_masks = x[:, :, -1].bool()
x = x[:, :, :-3]
x = x.reshape(-1, 3, self.patch_size, self.patch_size)
x = self.conv_root(x)
x = self.gn_root(x)
x = self.root_pool(x)
x = self.block1(x)
# to match tensorflow channel order
x = x.permute(0, 2, 3, 1)
x = x.reshape(b, seq_len, -1)
x = self.embedding(x)
x = self.transformer_encoder(
x, inputs_spatial_positions, inputs_scale_positions, inputs_masks
)
q = self.head(x[:, 0])
q = q.reshape(b, num_crops, -1)
q = q.mean(dim=1) # for multiple crops evaluation
mos = dist_to_mos(q)
return_list = []
if return_mos:
return_list.append(mos)
if return_dist:
return_list.append(q)
if len(return_list) > 1:
return return_list
else:
return return_list[0]
| 13,679 | 31.494062 | 136 | py |
BVQI | BVQI-master/pyiqa/archs/nlpd_arch.py | r"""NLPD Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/NLPD.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Matlab code from https://www.cns.nyu.edu/~lcv/NLPyr/NLP_dist.m;
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms.functional as tf
from pyiqa.archs.arch_util import ExactPadding2d
from pyiqa.archs.ssim_arch import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
LAPLACIAN_FILTER = np.array(
[
[0.0025, 0.0125, 0.0200, 0.0125, 0.0025],
[0.0125, 0.0625, 0.1000, 0.0625, 0.0125],
[0.0200, 0.1000, 0.1600, 0.1000, 0.0200],
[0.0125, 0.0625, 0.1000, 0.0625, 0.0125],
[0.0025, 0.0125, 0.0200, 0.0125, 0.0025],
],
dtype=np.float32,
)
@ARCH_REGISTRY.register()
class NLPD(nn.Module):
r"""Normalised lapalcian pyramid distance
Args:
channels: Number of channel expected to calculate.
test_y_channel: Boolean, whether to use y channel on ycbcr which mimics official matlab code.
References:
Laparra, Valero, Johannes Ballé, Alexander Berardino, and Eero P. Simoncelli.
"Perceptual image quality assessment using a normalized Laplacian pyramid."
Electronic Imaging 2016, no. 16 (2016): 1-6.
"""
def __init__(self, channels=1, test_y_channel=True, k=6, filt=None):
super(NLPD, self).__init__()
if filt is None:
filt = np.reshape(
np.tile(LAPLACIAN_FILTER, (channels, 1, 1)), (channels, 1, 5, 5)
)
self.k = k
self.channels = channels
self.test_y_channel = test_y_channel
self.filt = nn.Parameter(torch.Tensor(filt), requires_grad=False)
self.dn_filts, self.sigmas = self.DN_filters()
self.pad_zero_one = nn.ZeroPad2d(1)
self.pad_zero_two = nn.ZeroPad2d(2)
self.pad_sym = ExactPadding2d(5, mode="symmetric")
self.rep_one = nn.ReplicationPad2d(1)
self.ps = nn.PixelShuffle(2)
def DN_filters(self):
r"""Define parameters for the divisive normalization"""
sigmas = [0.0248, 0.0185, 0.0179, 0.0191, 0.0220, 0.2782]
dn_filts = []
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0.1011, 0], [0.1493, 0, 0.1460], [0, 0.1015, 0.0]]
* self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0.0757, 0], [0.1986, 0, 0.1846], [0, 0.0837, 0]]
* self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0.0477, 0], [0.2138, 0, 0.2243], [0, 0.0467, 0]]
* self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0, 0], [0.2503, 0, 0.2616], [0, 0, 0]] * self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0, 0], [0.2598, 0, 0.2552], [0, 0, 0]] * self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0, 0], [0.2215, 0, 0.0717], [0, 0, 0]] * self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts = nn.ParameterList(
[nn.Parameter(x, requires_grad=False) for x in dn_filts]
)
sigmas = nn.ParameterList(
[
nn.Parameter(torch.Tensor(np.array(x)), requires_grad=False)
for x in sigmas
]
)
return dn_filts, sigmas
def pyramid(self, im):
r"""Compute Laplacian Pyramid
Args:
im: An input tensor. Shape :math:`(N, C, H, W)`.
"""
out = []
J = im
pyr = []
for i in range(0, self.k - 1):
# Downsample. Official matlab code use 'symmetric' for padding.
I = F.conv2d(
self.pad_sym(J), self.filt, stride=2, padding=0, groups=self.channels
)
# for each dimension, check if the upsampled version has to be odd.
odd_h, odd_w = 2 * I.size(2) - J.size(2), 2 * I.size(3) - J.size(3)
# Upsample. Official matlab code interpolate '0' to upsample.
I_pad = self.rep_one(I)
I_rep1, I_rep2, I_rep3 = (
torch.zeros_like(I_pad),
torch.zeros_like(I_pad),
torch.zeros_like(I_pad),
)
R = torch.cat([I_pad * 4, I_rep1, I_rep2, I_rep3], dim=1)
I_up = self.ps(R)
I_up_conv = F.conv2d(
self.pad_zero_two(I_up),
self.filt,
stride=1,
padding=0,
groups=self.channels,
)
I_up_conv = I_up_conv[
:, :, 2 : (I_up.shape[2] - 2 - odd_h), 2 : (I_up.shape[3] - 2 - odd_w)
]
out = J - I_up_conv
# NLP Transformation, conv2 in matlab rotate filters by 180 degrees.
out_conv = F.conv2d(
self.pad_zero_one(torch.abs(out)),
tf.rotate(self.dn_filts[i], 180),
stride=1,
groups=self.channels,
)
out_norm = out / (self.sigmas[i] + out_conv)
pyr.append(out_norm)
J = I
# NLP Transformation for top layer, the coarest level contains the residual low pass image
out_conv = F.conv2d(
self.pad_zero_one(torch.abs(J)),
tf.rotate(self.dn_filts[-1], 180),
stride=1,
groups=self.channels,
)
out_norm = J / (self.sigmas[-1] + out_conv)
pyr.append(out_norm)
return pyr
def nlpd(self, x1, x2):
r"""Compute Normalised lapalcian pyramid distance for a batch of images.
Args:
x1: An input tensor. Shape :math:`(N, C, H, W)`.
x2: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
"""
assert (self.test_y_channel and self.channels == 1) or (
not self.test_y_channel and self.channels == 3
), "Number of channel and convert to YCBCR should be match"
if self.test_y_channel and self.channels == 1:
x1 = to_y_channel(x1)
x2 = to_y_channel(x2)
y1 = self.pyramid(x1)
y2 = self.pyramid(x2)
total = []
for z1, z2 in zip(y1, y2):
diff = (z1 - z2) ** 2
sqrt = torch.sqrt(torch.mean(diff, (1, 2, 3)))
total.append(sqrt)
score = torch.stack(total, dim=1).mean(1)
return score
def forward(self, X, Y):
"""Computation of NLPD metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of NLPD metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input {X.shape} and reference images should have the same shape"
score = self.nlpd(X, Y)
return score
| 7,884 | 31.854167 | 101 | py |
BVQI | BVQI-master/pyiqa/archs/mad_arch.py | r"""MAD Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/MAD.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Note:
Offical matlab code is not available;
Pytorch version >= 1.8.0;
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy.fft import fftshift
from pyiqa.matlab_utils import math_util
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
MAX = nn.MaxPool2d((2, 2), stride=1, padding=1)
def extract_patches_2d(
img: torch.Tensor,
patch_shape: list = [64, 64],
step: list = [27, 27],
batch_first: bool = True,
keep_last_patch: bool = False,
) -> torch.Tensor:
patch_H, patch_W = patch_shape[0], patch_shape[1]
if img.size(2) < patch_H:
num_padded_H_Top = (patch_H - img.size(2)) // 2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0, 0, num_padded_H_Top, num_padded_H_Bottom), 0)
img = padding_H(img)
if img.size(3) < patch_W:
num_padded_W_Left = (patch_W - img.size(3)) // 2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left, num_padded_W_Right, 0, 0), 0)
img = padding_W(img)
step_int = [0, 0]
step_int[0] = int(patch_H * step[0]) if (isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W * step[1]) if (isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if ((img.size(2) - patch_H) % step_int[0] != 0) and keep_last_patch:
patches_fold_H = torch.cat(
(
patches_fold_H,
img[
:,
:,
-patch_H:,
]
.permute(0, 1, 3, 2)
.unsqueeze(2),
),
dim=2,
)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if ((img.size(3) - patch_W) % step_int[1] != 0) and keep_last_patch:
patches_fold_HW = torch.cat(
(
patches_fold_HW,
patches_fold_H[:, :, :, -patch_W:, :]
.permute(0, 1, 2, 4, 3)
.unsqueeze(3),
),
dim=3,
)
patches = patches_fold_HW.permute(2, 3, 0, 1, 4, 5)
patches = patches.reshape(-1, img.size(0), img.size(1), patch_H, patch_W)
if batch_first:
patches = patches.permute(1, 0, 2, 3, 4)
return patches
def make_csf(rows, cols, nfreq):
xvals = np.arange(-(cols - 1) / 2.0, (cols + 1) / 2.0)
yvals = np.arange(-(rows - 1) / 2.0, (rows + 1) / 2.0)
xplane, yplane = np.meshgrid(xvals, yvals) # generate mesh
plane = ((xplane + 1j * yplane) / cols) * 2 * nfreq
radfreq = np.abs(plane) # radial frequency
w = 0.7
s = (1 - w) / 2 * np.cos(4 * np.angle(plane)) + (1 + w) / 2
radfreq = radfreq / s
# Now generate the CSF
csf = 2.6 * (0.0192 + 0.114 * radfreq) * np.exp(-((0.114 * radfreq) ** 1.1))
csf[radfreq < 7.8909] = 0.9809
return np.transpose(csf)
def get_moments(d, sk=False):
# Return the first 4 moments of the data provided
mean = torch.mean(d, dim=[3, 4], keepdim=True)
diffs = d - mean
var = torch.mean(torch.pow(diffs, 2.0), dim=[3, 4], keepdim=True)
std = torch.pow(var + 1e-12, 0.5)
if sk:
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0), dim=[3, 4], keepdim=True)
kurtoses = (
torch.mean(torch.pow(zscores, 4.0), dim=[3, 4], keepdim=True) - 3.0
) # excess kurtosis, should be 0 for Gaussian
return mean, std, skews, kurtoses
else:
return mean, std
def ical_stat(x, p=16, s=4):
B, C, H, W = x.shape
x1 = extract_patches_2d(x, patch_shape=[p, p], step=[s, s])
_, std, skews, kurt = get_moments(x1, sk=True)
STD = std.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
SKEWS = skews.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
KURT = kurt.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
return STD, SKEWS, KURT # different with original version
def ical_std(x, p=16, s=4):
B, C, H, W = x.shape
x1 = extract_patches_2d(x, patch_shape=[p, p], step=[s, s])
mean, std = get_moments(x1)
mean = mean.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
std = std.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
return mean, std
def hi_index(ref_img, dst_img):
k = 0.02874
G = 0.5
C_slope = 1
Ci_thrsh = -5
Cd_thrsh = -5
ref = k * (ref_img + 1e-12) ** (2.2 / 3)
dst = k * (torch.abs(dst_img) + 1e-12) ** (2.2 / 3)
B, C, H, W = ref.shape
csf = make_csf(H, W, 32)
csf = (
torch.from_numpy(csf.reshape(1, 1, H, W, 1))
.float()
.repeat(1, C, 1, 1, 2)
.to(ref.device)
)
x = torch.fft.fft2(ref)
x1 = math_util.batch_fftshift2d(x)
x2 = math_util.batch_ifftshift2d(x1 * csf)
ref = torch.fft.ifft2(x2).real
x = torch.fft.fft2(dst)
x1 = math_util.batch_fftshift2d(x)
x2 = math_util.batch_ifftshift2d(x1 * csf)
dst = torch.fft.ifft2(x2).real
m1_1, std_1 = ical_std(ref)
B, C, H1, W1 = m1_1.shape
std_1 = (-MAX(-std_1) / 2)[:, :, :H1, :W1]
_, std_2 = ical_std(dst - ref)
BSIZE = 16
eps = 1e-12
Ci_ref = torch.log(torch.abs((std_1 + eps) / (m1_1 + eps)))
Ci_dst = torch.log(torch.abs((std_2 + eps) / (m1_1 + eps)))
Ci_dst = Ci_dst.masked_fill(m1_1 < G, -1000)
idx1 = (Ci_ref > Ci_thrsh) & (Ci_dst > (C_slope * (Ci_ref - Ci_thrsh) + Cd_thrsh))
idx2 = (Ci_ref <= Ci_thrsh) & (Ci_dst > Cd_thrsh)
msk = Ci_ref.clone()
msk = msk.masked_fill(~idx1, 0)
msk = msk.masked_fill(~idx2, 0)
msk[idx1] = Ci_dst[idx1] - (C_slope * (Ci_ref[idx1] - Ci_thrsh) + Cd_thrsh)
msk[idx2] = Ci_dst[idx2] - Cd_thrsh
win = (
torch.ones((1, 1, BSIZE, BSIZE)).repeat(C, 1, 1, 1).to(ref.device) / BSIZE ** 2
)
xx = (ref_img - dst_img) ** 2
lmse = F.conv2d(xx, win, stride=4, padding=0, groups=C)
mp = msk * lmse
B, C, H, W = mp.shape
return torch.norm(mp.reshape(B, C, -1), dim=2) / math.sqrt(H * W) * 200
def gaborconvolve(im):
nscale = 5 # Number of wavelet scales.
norient = 4 # Number of filter orientations.
minWaveLength = 3 # Wavelength of smallest scale filter.
mult = 3 # Scaling factor between successive filters.
sigmaOnf = 0.55 # Ratio of the standard deviation of the
wavelength = [
minWaveLength,
minWaveLength * mult,
minWaveLength * mult ** 2,
minWaveLength * mult ** 3,
minWaveLength * mult ** 4,
]
# Ratio of angular interval between filter orientations
dThetaOnSigma = 1.5
# Fourier transform of image
B, C, rows, cols = im.shape
# imagefft = torch.rfft(im,2, onesided=False)
imagefft = torch.fft.fft2(im)
# Pre-compute to speed up filter construction
x = np.ones((rows, 1)) * np.arange(-cols / 2.0, (cols / 2.0)) / (cols / 2.0)
y = np.dot(
np.expand_dims(np.arange(-rows / 2.0, (rows / 2.0)), 1),
np.ones((1, cols)) / (rows / 2.0),
)
# Matrix values contain *normalised* radius from centre.
radius = np.sqrt(x ** 2 + y ** 2)
# Get rid of the 0 radius value in the middle
radius[int(np.round(rows / 2 + 1)), int(np.round(cols / 2 + 1))] = 1
radius = np.log(radius + 1e-12)
# Matrix values contain polar angle.
theta = np.arctan2(-y, x)
sintheta = np.sin(theta)
costheta = np.cos(theta)
# Calculate the standard deviation
thetaSigma = math.pi / norient / dThetaOnSigma
logGabors = []
for s in range(nscale):
# Construct the filter - first calculate the radial filter component.
fo = 1.0 / wavelength[s] # Centre frequency of filter.
rfo = fo / 0.5 # Normalised radius from centre of frequency plane
# corresponding to fo.
tmp = -(2 * np.log(sigmaOnf) ** 2)
tmp2 = np.log(rfo)
logGabors.append(np.exp((radius - tmp2) ** 2 / tmp))
logGabors[s][int(np.round(rows / 2)), int(np.round(cols / 2))] = 0
E0 = [[], [], [], []]
for o in range(norient):
# Calculate filter angle.
angl = o * math.pi / norient
ds = sintheta * np.cos(angl) - costheta * np.sin(angl) # Difference in sine.
dc = costheta * np.cos(angl) + sintheta * np.sin(angl) # Difference in cosine.
dtheta = np.abs(np.arctan2(ds, dc)) # Absolute angular distance.
spread = np.exp(
(-(dtheta ** 2)) / (2 * thetaSigma ** 2)
) # Calculate the angular filter component.
for s in range(nscale):
filter = fftshift(logGabors[s] * spread)
filter = torch.from_numpy(filter).reshape(1, 1, rows, cols).to(im.device)
e0 = torch.fft.ifft2(imagefft * filter)
E0[o].append(torch.stack((e0.real, e0.imag), -1))
return E0
def lo_index(ref, dst):
gabRef = gaborconvolve(ref)
gabDst = gaborconvolve(dst)
s = [0.5 / 13.25, 0.75 / 13.25, 1 / 13.25, 5 / 13.25, 6 / 13.25]
mp = 0
for gb_i in range(4):
for gb_j in range(5):
stdref, skwref, krtref = ical_stat(math_util.abs(gabRef[gb_i][gb_j]))
stddst, skwdst, krtdst = ical_stat(math_util.abs(gabDst[gb_i][gb_j]))
mp = mp + s[gb_i] * (
torch.abs(stdref - stddst)
+ 2 * torch.abs(skwref - skwdst)
+ torch.abs(krtref - krtdst)
)
B, C, rows, cols = mp.shape
return torch.norm(mp.reshape(B, C, -1), dim=2) / np.sqrt(rows * cols)
@ARCH_REGISTRY.register()
class MAD(torch.nn.Module):
r"""Args:
channel: Number of input channel.
test_y_channel: bool, whether to use y channel on ycbcr which mimics official matlab code.
References:
Larson, Eric Cooper, and Damon Michael Chandler. "Most apparent distortion: full-reference
image quality assessment and the role of strategy." Journal of electronic imaging 19, no. 1
(2010): 011006.
"""
def __init__(self, channels=3, test_y_channel=True):
super(MAD, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
def mad(self, ref, dst):
r"""Compute MAD for a batch of images.
Args:
ref: An reference tensor. Shape :math:`(N, C, H, W)`.
dst: A distortion tensor. Shape :math:`(N, C, H, W)`.
"""
if self.test_y_channel and ref.shape[1] == 3:
ref = to_y_channel(ref, 255.0)
dst = to_y_channel(dst, 255.0)
self.channels = 1
HI = hi_index(ref, dst)
LO = lo_index(ref, dst)
thresh1 = 2.55
thresh2 = 3.35
b1 = math.exp(-thresh1 / thresh2)
b2 = 1 / (math.log(10) * thresh2)
sig = 1 / (1 + b1 * HI ** b2)
MAD = LO ** (1 - sig) * HI ** (sig)
return MAD.mean(1)
def forward(self, X, Y):
r"""Computation of CW-SSIM metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of MAD metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input and reference images should have the same shape, but got {X.shape} and {Y.shape}"
score = self.mad(Y, X)
return score
| 11,628 | 31.66573 | 100 | py |
BVQI | BVQI-master/pyiqa/archs/arch_util.py | import collections.abc
import math
from builtins import ValueError
from collections import OrderedDict
from itertools import repeat
from typing import Tuple
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from pyiqa.utils.download_util import load_file_from_url
# --------------------------------------------
# IQA utils
# --------------------------------------------
def dist_to_mos(dist_score: torch.Tensor) -> torch.Tensor:
"""Convert distribution prediction to mos score.
For datasets with detailed score labels, such as AVA
Args:
dist_score (tensor): (*, C), C is the class number
Output:
mos_score (tensor): (*, 1)
"""
num_classes = dist_score.shape[-1]
mos_score = dist_score * torch.arange(1, num_classes + 1).to(dist_score)
mos_score = mos_score.sum(dim=-1, keepdim=True)
return mos_score
# --------------------------------------------
# Common utils
# --------------------------------------------
def clean_state_dict(state_dict):
# 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training
cleaned_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k.startswith("module.") else k
cleaned_state_dict[name] = v
return cleaned_state_dict
def load_pretrained_network(net, model_path, strict=True, weight_keys=None):
if model_path.startswith("https://") or model_path.startswith("http://"):
model_path = load_file_from_url(model_path)
state_dict = torch.load(model_path, map_location=torch.device("cpu"))
if weight_keys is not None:
state_dict = state_dict[weight_keys]
state_dict = clean_state_dict(state_dict)
net.load_state_dict(state_dict, strict=strict)
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
@torch.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
r"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0.
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
def symm_pad(im: torch.Tensor, padding: Tuple[int, int, int, int]):
"""Symmetric padding same as tensorflow.
Ref: https://discuss.pytorch.org/t/symmetric-padding/19866/3
"""
h, w = im.shape[-2:]
left, right, top, bottom = padding
x_idx = np.arange(-left, w + right)
y_idx = np.arange(-top, h + bottom)
def reflect(x, minx, maxx):
"""Reflects an array around two points making a triangular waveform that ramps up
and down, allowing for pad lengths greater than the input length"""
rng = maxx - minx
double_rng = 2 * rng
mod = np.fmod(x - minx, double_rng)
normed_mod = np.where(mod < 0, mod + double_rng, mod)
out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx
return np.array(out, dtype=x.dtype)
x_pad = reflect(x_idx, -0.5, w - 0.5)
y_pad = reflect(y_idx, -0.5, h - 0.5)
xx, yy = np.meshgrid(x_pad, y_pad)
return im[..., yy, xx]
def excact_padding_2d(x, kernel, stride=1, dilation=1, mode="same"):
assert len(x.shape) == 4, f"Only support 4D tensor input, but got {x.shape}"
kernel = to_2tuple(kernel)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
b, c, h, w = x.shape
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + (kernel[0] - 1) * dilation[0] + 1 - h
pad_col = (w2 - 1) * stride[1] + (kernel[1] - 1) * dilation[1] + 1 - w
pad_l, pad_r, pad_t, pad_b = (
pad_col // 2,
pad_col - pad_col // 2,
pad_row // 2,
pad_row - pad_row // 2,
)
mode = mode if mode != "same" else "constant"
if mode != "symmetric":
x = F.pad(x, (pad_l, pad_r, pad_t, pad_b), mode=mode)
elif mode == "symmetric":
x = symm_pad(x, (pad_l, pad_r, pad_t, pad_b))
return x
class ExactPadding2d(nn.Module):
r"""This function calculate exact padding values for 4D tensor inputs,
and support the same padding mode as tensorflow.
Args:
kernel (int or tuple): kernel size.
stride (int or tuple): stride size.
dilation (int or tuple): dilation size, default with 1.
mode (srt): padding mode can be ('same', 'symmetric', 'replicate', 'circular')
"""
def __init__(self, kernel, stride=1, dilation=1, mode="same"):
super().__init__()
self.kernel = to_2tuple(kernel)
self.stride = to_2tuple(stride)
self.dilation = to_2tuple(dilation)
self.mode = mode
def forward(self, x):
return excact_padding_2d(x, self.kernel, self.stride, self.dilation, self.mode)
| 6,039 | 32.005464 | 103 | py |
BVQI | BVQI-master/pyiqa/archs/fsim_arch.py | r"""FSIM Metric
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/fsim.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Official matlab code from https://www4.comp.polyu.edu.hk/~cslzhang/IQA/FSIM/Files/FeatureSIM.m
PIQA from https://github.com/francois-rozet/piqa/blob/master/piqa/fsim.py
"""
import functools
import math
from typing import Tuple
import torch
import torch.nn as nn
from pyiqa.utils.color_util import rgb2yiq
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import get_meshgrid, gradient_map, ifftshift, similarity_map
def fsim(
x: torch.Tensor,
y: torch.Tensor,
chromatic: bool = True,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
) -> torch.Tensor:
r"""Compute Feature Similarity Index Measure for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
chromatic: Flag to compute FSIMc, which also takes into account chromatic components
scales: Number of wavelets used for computation of phase congruensy maps
orientations: Number of filter orientations used for computation of phase congruensy maps
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian describing the log Gabor filter's
transfer function in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations and the standard deviation
of the angular Gaussian function used to construct filters in the frequency plane.
k: No of standard deviations of the noise energy beyond the mean at which we set the noise
threshold point, below which phase congruency values get penalized.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
Can be bigger than 1 for predicted :math:`x` images with higher contrast than the original ones.
References:
L. Zhang, L. Zhang, X. Mou and D. Zhang, "FSIM: A Feature Similarity Index for Image Quality Assessment,"
IEEE Transactions on Image Processing, vol. 20, no. 8, pp. 2378-2386, Aug. 2011, doi: 10.1109/TIP.2011.2109730.
https://ieeexplore.ieee.org/document/5705575
"""
# Rescale to [0, 255] range, because all constant are calculated for this factor
x = x / float(1.0) * 255
y = y / float(1.0) * 255
# Apply average pooling
kernel_size = max(1, round(min(x.shape[-2:]) / 256))
x = torch.nn.functional.avg_pool2d(x, kernel_size)
y = torch.nn.functional.avg_pool2d(y, kernel_size)
num_channels = x.size(1)
# Convert RGB to YIQ color space
if num_channels == 3:
x_yiq = rgb2yiq(x)
y_yiq = rgb2yiq(y)
x_lum = x_yiq[:, :1]
y_lum = y_yiq[:, :1]
x_i = x_yiq[:, 1:2]
y_i = y_yiq[:, 1:2]
x_q = x_yiq[:, 2:]
y_q = y_yiq[:, 2:]
else:
x_lum = x
y_lum = y
# Compute phase congruency maps
pc_x = _phase_congruency(
x_lum,
scales=scales,
orientations=orientations,
min_length=min_length,
mult=mult,
sigma_f=sigma_f,
delta_theta=delta_theta,
k=k,
)
pc_y = _phase_congruency(
y_lum,
scales=scales,
orientations=orientations,
min_length=min_length,
mult=mult,
sigma_f=sigma_f,
delta_theta=delta_theta,
k=k,
)
# Gradient maps
scharr_filter = (
torch.tensor([[[-3.0, 0.0, 3.0], [-10.0, 0.0, 10.0], [-3.0, 0.0, 3.0]]]) / 16
)
kernels = torch.stack([scharr_filter, scharr_filter.transpose(-1, -2)])
grad_map_x = gradient_map(x_lum, kernels)
grad_map_y = gradient_map(y_lum, kernels)
# Constants from the paper
T1, T2, T3, T4, lmbda = 0.85, 160, 200, 200, 0.03
# Compute FSIM
PC = similarity_map(pc_x, pc_y, T1)
GM = similarity_map(grad_map_x, grad_map_y, T2)
pc_max = torch.where(pc_x > pc_y, pc_x, pc_y)
score = GM * PC * pc_max # torch.sum(score)/torch.sum(pc_max)
if chromatic:
assert (
num_channels == 3
), "Chromatic component can be computed only for RGB images!"
S_I = similarity_map(x_i, y_i, T3)
S_Q = similarity_map(x_q, y_q, T4)
score = score * torch.abs(S_I * S_Q) ** lmbda
# Complex gradients will work in PyTorch 1.6.0
# score = score * torch.real((S_I * S_Q).to(torch.complex64) ** lmbda)
result = score.sum(dim=[1, 2, 3]) / pc_max.sum(dim=[1, 2, 3])
return result
def _construct_filters(
x: torch.Tensor,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
use_lowpass_filter=True,
):
"""Creates a stack of filters used for computation of phase congruensy maps
Args:
x: Tensor. Shape :math:`(N, 1, H, W)`.
scales: Number of wavelets
orientations: Number of filter orientations
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations
and the standard deviation of the angular Gaussian function
used to construct filters in the freq. plane.
k: No of standard deviations of the noise energy beyond the mean
at which we set the noise threshold point, below which phase
congruency values get penalized.
"""
N, _, H, W = x.shape
# Calculate the standard deviation of the angular Gaussian function
# used to construct filters in the freq. plane.
theta_sigma = math.pi / (orientations * delta_theta)
# Pre-compute some stuff to speed up filter construction
grid_x, grid_y = get_meshgrid((H, W))
radius = torch.sqrt(grid_x ** 2 + grid_y ** 2)
theta = torch.atan2(-grid_y, grid_x)
# Quadrant shift radius and theta so that filters are constructed with 0 frequency at the corners.
# Get rid of the 0 radius value at the 0 frequency point (now at top-left corner)
# so that taking the log of the radius will not cause trouble.
radius = ifftshift(radius)
theta = ifftshift(theta)
radius[0, 0] = 1
sintheta = torch.sin(theta)
costheta = torch.cos(theta)
# Filters are constructed in terms of two components.
# 1) The radial component, which controls the frequency band that the filter responds to
# 2) The angular component, which controls the orientation that the filter responds to.
# The two components are multiplied together to construct the overall filter.
# First construct a low-pass filter that is as large as possible, yet falls
# away to zero at the boundaries. All log Gabor filters are multiplied by
# this to ensure no extra frequencies at the 'corners' of the FFT are
# incorporated as this seems to upset the normalisation process when
lp = _lowpassfilter(size=(H, W), cutoff=0.45, n=15)
# Construct the radial filter components...
log_gabor = []
for s in range(scales):
wavelength = min_length * mult ** s
omega_0 = 1.0 / wavelength
gabor_filter = torch.exp(
(-torch.log(radius / omega_0) ** 2) / (2 * math.log(sigma_f) ** 2)
)
if use_lowpass_filter:
gabor_filter = gabor_filter * lp
gabor_filter[0, 0] = 0
log_gabor.append(gabor_filter)
# Then construct the angular filter components...
spread = []
for o in range(orientations):
angl = o * math.pi / orientations
# For each point in the filter matrix calculate the angular distance from
# the specified filter orientation. To overcome the angular wrap-around
# problem sine difference and cosine difference values are first computed
# and then the atan2 function is used to determine angular distance.
ds = sintheta * math.cos(angl) - costheta * math.sin(
angl
) # Difference in sine.
dc = costheta * math.cos(angl) + sintheta * math.sin(
angl
) # Difference in cosine.
dtheta = torch.abs(torch.atan2(ds, dc))
spread.append(torch.exp((-(dtheta ** 2)) / (2 * theta_sigma ** 2)))
spread = torch.stack(spread)
log_gabor = torch.stack(log_gabor)
# Multiply, add batch dimension and transfer to correct device.
filters = (
(spread.repeat_interleave(scales, dim=0) * log_gabor.repeat(orientations, 1, 1))
.unsqueeze(0)
.to(x)
)
return filters
def _phase_congruency(
x: torch.Tensor,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
) -> torch.Tensor:
r"""Compute Phase Congruence for a batch of greyscale images
Args:
x: Tensor. Shape :math:`(N, 1, H, W)`.
scales: Number of wavelet scales
orientations: Number of filter orientations
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations
and the standard deviation of the angular Gaussian function
used to construct filters in the freq. plane.
k: No of standard deviations of the noise energy beyond the mean
at which we set the noise threshold point, below which phase
congruency values get penalized.
Returns:
Phase Congruency map with shape :math:`(N, H, W)`
"""
EPS = torch.finfo(x.dtype).eps
N, _, H, W = x.shape
# Fourier transform
filters = _construct_filters(
x, scales, orientations, min_length, mult, sigma_f, delta_theta, k
)
imagefft = torch.fft.fft2(x)
filters_ifft = torch.fft.ifft2(filters)
filters_ifft = filters_ifft.real * math.sqrt(H * W)
even_odd = torch.view_as_real(torch.fft.ifft2(imagefft * filters)).view(
N, orientations, scales, H, W, 2
)
# Amplitude of even & odd filter response. An = sqrt(real^2 + imag^2)
an = torch.sqrt(torch.sum(even_odd ** 2, dim=-1))
# Take filter at scale 0 and sum spatially
# Record mean squared filter value at smallest scale.
# This is used for noise estimation.
em_n = (filters.view(1, orientations, scales, H, W)[:, :, :1, ...] ** 2).sum(
dim=[-2, -1], keepdims=True
)
# Sum of even filter convolution results.
sum_e = even_odd[..., 0].sum(dim=2, keepdims=True)
# Sum of odd filter convolution results.
sum_o = even_odd[..., 1].sum(dim=2, keepdims=True)
# Get weighted mean filter response vector, this gives the weighted mean phase angle.
x_energy = torch.sqrt(sum_e ** 2 + sum_o ** 2) + EPS
mean_e = sum_e / x_energy
mean_o = sum_o / x_energy
# Now calculate An(cos(phase_deviation) - | sin(phase_deviation)) | by
# using dot and cross products between the weighted mean filter response
# vector and the individual filter response vectors at each scale.
# This quantity is phase congruency multiplied by An, which we call energy.
# Extract even and odd convolution results.
even = even_odd[..., 0]
odd = even_odd[..., 1]
energy = (
even * mean_e + odd * mean_o - torch.abs(even * mean_o - odd * mean_e)
).sum(dim=2, keepdim=True)
# Compensate for noise
# We estimate the noise power from the energy squared response at the
# smallest scale. If the noise is Gaussian the energy squared will have a
# Chi-squared 2DOF pdf. We calculate the median energy squared response
# as this is a robust statistic. From this we estimate the mean.
# The estimate of noise power is obtained by dividing the mean squared
# energy value by the mean squared filter value
abs_eo = torch.sqrt(torch.sum(even_odd[:, :, :1, ...] ** 2, dim=-1)).reshape(
N, orientations, 1, 1, H * W
)
median_e2n = torch.median(abs_eo ** 2, dim=-1, keepdim=True).values
mean_e2n = -median_e2n / math.log(0.5)
# Estimate of noise power.
noise_power = mean_e2n / em_n
# Now estimate the total energy^2 due to noise
# Estimate for sum(An^2) + sum(Ai.*Aj.*(cphi.*cphj + sphi.*sphj))
filters_ifft = filters_ifft.view(1, orientations, scales, H, W)
sum_an2 = torch.sum(filters_ifft ** 2, dim=-3, keepdim=True)
sum_ai_aj = torch.zeros(N, orientations, 1, H, W).to(x)
for s in range(scales - 1):
sum_ai_aj = sum_ai_aj + (
filters_ifft[:, :, s : s + 1] * filters_ifft[:, :, s + 1 :]
).sum(dim=-3, keepdim=True)
sum_an2 = torch.sum(sum_an2, dim=[-1, -2], keepdim=True)
sum_ai_aj = torch.sum(sum_ai_aj, dim=[-1, -2], keepdim=True)
noise_energy2 = 2 * noise_power * sum_an2 + 4 * noise_power * sum_ai_aj
# Rayleigh parameter
tau = torch.sqrt(noise_energy2 / 2)
# Expected value of noise energy
noise_energy = tau * math.sqrt(math.pi / 2)
moise_energy_sigma = torch.sqrt((2 - math.pi / 2) * tau ** 2)
# Noise threshold
T = noise_energy + k * moise_energy_sigma
# The estimated noise effect calculated above is only valid for the PC_1 measure.
# The PC_2 measure does not lend itself readily to the same analysis. However
# empirically it seems that the noise effect is overestimated roughly by a factor
# of 1.7 for the filter parameters used here.
# Empirical rescaling of the estimated noise effect to suit the PC_2 phase congruency measure
T = T / 1.7
# Apply noise threshold
energy = torch.max(energy - T, torch.zeros_like(T))
eps = torch.finfo(energy.dtype).eps
energy_all = energy.sum(dim=[1, 2]) + eps
an_all = an.sum(dim=[1, 2]) + eps
result_pc = energy_all / an_all
return result_pc.unsqueeze(1)
def _lowpassfilter(size: Tuple[int, int], cutoff: float, n: int) -> torch.Tensor:
r"""
Constructs a low-pass Butterworth filter.
Args:
size: Tuple with heigth and width of filter to construct
cutoff: Cutoff frequency of the filter in (0, 0.5()
n: Filter order. Higher `n` means sharper transition.
Note that `n` is doubled so that it is always an even integer.
Returns:
f = 1 / (1 + w/cutoff) ^ 2n
"""
assert 0 < cutoff <= 0.5, "Cutoff frequency must be between 0 and 0.5"
assert n > 1 and int(n) == n, "n must be an integer >= 1"
grid_x, grid_y = get_meshgrid(size)
# A matrix with every pixel = radius relative to centre.
radius = torch.sqrt(grid_x ** 2 + grid_y ** 2)
return ifftshift(1.0 / (1.0 + (radius / cutoff) ** (2 * n)))
@ARCH_REGISTRY.register()
class FSIM(nn.Module):
r"""Args:
chromatic: Flag to compute FSIMc, which also takes into account chromatic components
scales: Number of wavelets used for computation of phase congruensy maps
orientations: Number of filter orientations used for computation of phase congruensy maps
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian describing the log Gabor filter's
transfer function in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations and the standard deviation
of the angular Gaussian function used to construct filters in the frequency plane.
k: No of standard deviations of the noise energy beyond the mean at which we set the noise
threshold point, below which phase congruency values get penalized.
References:
L. Zhang, L. Zhang, X. Mou and D. Zhang, "FSIM: A Feature Similarity Index for Image Quality Assessment,"
IEEE Transactions on Image Processing, vol. 20, no. 8, pp. 2378-2386, Aug. 2011, doi: 10.1109/TIP.2011.2109730.
https://ieeexplore.ieee.org/document/5705575
"""
def __init__(
self,
chromatic: bool = True,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
) -> None:
super().__init__()
# Save function with predefined parameters, rather than parameters themself
self.fsim = functools.partial(
fsim,
chromatic=chromatic,
scales=scales,
orientations=orientations,
min_length=min_length,
mult=mult,
sigma_f=sigma_f,
delta_theta=delta_theta,
k=k,
)
def forward(
self,
X: torch.Tensor,
Y: torch.Tensor,
) -> torch.Tensor:
r"""Computation of FSIM as a loss function.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of FSIM loss to be minimized in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input and reference images should have the same shape, but got {X.shape} and {Y.shape}"
score = self.fsim(X, Y)
return score
| 18,004 | 36.354772 | 119 | py |
BVQI | BVQI-master/pyiqa/archs/niqe_arch.py | r"""NIQE and ILNIQE Metrics
NIQE Metric
Created by: https://github.com/xinntao/BasicSR/blob/5668ba75eb8a77e8d2dd46746a36fee0fbb0fdcd/basicsr/metrics/niqe.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Reference:
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
ILNIQE Metric
Created by: Chaofeng Chen (https://github.com/chaofengc)
Reference:
- Python codes: https://github.com/IceClear/IL-NIQE/blob/master/IL-NIQE.py
- Matlab codes: https://www4.comp.polyu.edu.hk/~cslzhang/IQA/ILNIQE/Files/ILNIQE.zip
"""
import math
import numpy as np
import scipy
import scipy.io
import torch
from pyiqa.archs.fsim_arch import _construct_filters
from pyiqa.matlab_utils import (
blockproc,
conv2d,
fitweibull,
fspecial,
imfilter,
imresize,
nancov,
nanmean,
)
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import diff_round, estimate_aggd_param, normalize_img_with_guass
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"niqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"ilniqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ILNIQE_templateModel.mat",
}
def compute_feature(
block: torch.Tensor,
ilniqe: bool = False,
) -> torch.Tensor:
"""Compute features.
Args:
block (Tensor): Image block in shape (b, c, h, w).
Returns:
list: Features with length of 18.
"""
bsz = block.shape[0]
aggd_block = block[:, [0]]
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block)
feat = [alpha, (beta_l + beta_r) / 2]
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for i in range(len(shifts)):
shifted_block = torch.roll(aggd_block, shifts[i], dims=(2, 3))
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block * shifted_block)
# Eq. 8
mean = (beta_r - beta_l) * (
torch.lgamma(2 / alpha) - torch.lgamma(1 / alpha)
).exp()
feat.extend((alpha, mean, beta_l, beta_r))
feat = [x.reshape(bsz, 1) for x in feat]
if ilniqe:
tmp_block = block[:, 1:4]
channels = 4 - 1
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
mu = torch.mean(block[:, 4:7], dim=(2, 3))
sigmaSquare = torch.var(block[:, 4:7], dim=(2, 3))
mu_sigma = torch.stack((mu, sigmaSquare), dim=-1).reshape(bsz, -1)
feat.append(mu_sigma)
channels = 85 - 7
tmp_block = block[:, 7:85].reshape(bsz * channels, 1, *block.shape[2:])
alpha_data, beta_l_data, beta_r_data = estimate_aggd_param(tmp_block)
alpha_data = alpha_data.reshape(bsz, channels)
beta_l_data = beta_l_data.reshape(bsz, channels)
beta_r_data = beta_r_data.reshape(bsz, channels)
alpha_beta = torch.stack(
[alpha_data, (beta_l_data + beta_r_data) / 2], dim=-1
).reshape(bsz, -1)
feat.append(alpha_beta)
tmp_block = block[:, 85:109]
channels = 109 - 85
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
feat = torch.cat(feat, dim=-1)
return feat
def niqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
block_size_h: int = 96,
block_size_w: int = 96,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (Tensor): A 7x7 Gaussian window used for smoothing the image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
img_normalized = normalize_img_with_guass(img, padding="replicate")
distparam.append(
blockproc(
img_normalized,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
)
)
if scale == 1:
img = imresize(img / 255.0, scale=0.5, antialiasing=True)
img = img * 255.0
distparam = torch.cat(distparam, -1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = nanmean(distparam, dim=1)
cov_distparam = nancov(distparam)
# compute niqe quality, Eq. 10 in the paper
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = (mu_pris_param - mu_distparam).unsqueeze(1)
quality = torch.bmm(torch.bmm(diff, invcov_param), diff.transpose(1, 2)).squeeze()
quality = torch.sqrt(quality)
return quality
def calculate_niqe(
img: torch.Tensor,
crop_border: int = 0,
test_y_channel: bool = True,
pretrained_model_path: str = None,
color_space: str = "yiq",
**kwargs,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
test_y_channel (Bool): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
mu_pris_param = np.ravel(params["mu_prisparam"])
cov_pris_param = params["cov_prisparam"]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
if test_y_channel and img.shape[1] == 3:
print(img.shape)
img = to_y_channel(img, 255, color_space)
img = diff_round(img)
img = img.to(torch.float64)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
niqe_result = niqe(img, mu_pris_param, cov_pris_param)
return niqe_result
def gauDerivative(sigma, in_ch=1, out_ch=1, device=None):
halfLength = math.ceil(3 * sigma)
x, y = np.meshgrid(
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
)
gauDerX = x * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
gauDerY = y * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
dx = torch.from_numpy(gauDerX).to(device)
dy = torch.from_numpy(gauDerY).to(device)
dx = dx.repeat(out_ch, in_ch, 1, 1)
dy = dy.repeat(out_ch, in_ch, 1, 1)
return dx, dy
def ilniqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
principleVectors: torch.Tensor,
meanOfSampleData: torch.Tensor,
resize: bool = True,
block_size_h: int = 84,
block_size_w: int = 84,
) -> torch.Tensor:
"""Calculate IL-NIQE (Integrated Local Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
principleVectors (Tensor): Features from official .mat file.
meanOfSampleData (Tensor): Features from official .mat file.
resize (Bloolean): resize image. Default: True.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 84 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 84 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
sigmaForGauDerivative = 1.66
KforLog = 0.00001
normalizedWidth = 524
minWaveLength = 2.4
sigmaOnf = 0.55
mult = 1.31
dThetaOnSigma = 1.10
scaleFactorForLoG = 0.87
scaleFactorForGaussianDer = 0.28
sigmaForDownsample = 0.9
EPS = 1e-8
scales = 3
orientations = 4
infConst = 10000
nanConst = 2000
if resize:
img = imresize(img, sizes=(normalizedWidth, normalizedWidth))
img = img.clamp(0.0, 255.0)
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
ospace_weight = torch.tensor(
[
[0.3, 0.04, -0.35],
[0.34, -0.6, 0.17],
[0.06, 0.63, 0.27],
]
).to(img)
O_img = img.permute(0, 2, 3, 1) @ ospace_weight.T
O_img = O_img.permute(0, 3, 1, 2)
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
struct_dis = normalize_img_with_guass(
O_img[:, [2]], kernel_size=5, sigma=5.0 / 6, padding="replicate"
)
dx, dy = gauDerivative(
sigmaForGauDerivative / (scale ** scaleFactorForGaussianDer), device=img
)
Ix = conv2d(O_img, dx.repeat(3, 1, 1, 1), groups=3)
Iy = conv2d(O_img, dy.repeat(3, 1, 1, 1), groups=3)
GM = torch.sqrt(Ix ** 2 + Iy ** 2 + EPS)
Ixy = torch.stack((Ix, Iy), dim=2).reshape(
Ix.shape[0], Ix.shape[1] * 2, *Ix.shape[2:]
) # reshape to (IxO1, IxO1, IxO2, IyO2, IxO3, IyO3)
logRGB = torch.log(img + KforLog)
logRGBMS = logRGB - logRGB.mean(dim=(2, 3), keepdim=True)
Intensity = logRGBMS.sum(dim=1, keepdim=True) / np.sqrt(3)
BY = (logRGBMS[:, [0]] + logRGBMS[:, [1]] - 2 * logRGBMS[:, [2]]) / np.sqrt(6)
RG = (logRGBMS[:, [0]] - logRGBMS[:, [1]]) / np.sqrt(2)
compositeMat = torch.cat([struct_dis, GM, Intensity, BY, RG, Ixy], dim=1)
O3 = O_img[:, [2]]
# gabor filter in shape (b, ori * scale, h, w)
LGFilters = _construct_filters(
O3,
scales=scales,
orientations=orientations,
min_length=minWaveLength / (scale ** scaleFactorForLoG),
sigma_f=sigmaOnf,
mult=mult,
delta_theta=dThetaOnSigma,
use_lowpass_filter=False,
)
# reformat to scale * ori
b, _, h, w = LGFilters.shape
LGFilters = (
LGFilters.reshape(b, orientations, scales, h, w)
.transpose(1, 2)
.reshape(b, -1, h, w)
)
# TODO: current filters needs to be transposed to get same results as matlab, find the bug
LGFilters = LGFilters.transpose(-1, -2)
fftIm = torch.fft.fft2(O3)
logResponse = []
partialDer = []
GM = []
for index in range(LGFilters.shape[1]):
filter = LGFilters[:, [index]]
response = torch.fft.ifft2(filter * fftIm)
realRes = torch.real(response)
imagRes = torch.imag(response)
partialXReal = conv2d(realRes, dx)
partialYReal = conv2d(realRes, dy)
realGM = torch.sqrt(partialXReal ** 2 + partialYReal ** 2 + EPS)
partialXImag = conv2d(imagRes, dx)
partialYImag = conv2d(imagRes, dy)
imagGM = torch.sqrt(partialXImag ** 2 + partialYImag ** 2 + EPS)
logResponse.append(realRes)
logResponse.append(imagRes)
partialDer.append(partialXReal)
partialDer.append(partialYReal)
partialDer.append(partialXImag)
partialDer.append(partialYImag)
GM.append(realGM)
GM.append(imagGM)
logResponse = torch.cat(logResponse, dim=1)
partialDer = torch.cat(partialDer, dim=1)
GM = torch.cat(GM, dim=1)
compositeMat = torch.cat((compositeMat, logResponse, partialDer, GM), dim=1)
distparam.append(
blockproc(
compositeMat,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
ilniqe=True,
)
)
gauForDS = fspecial(math.ceil(6 * sigmaForDownsample), sigmaForDownsample).to(
img
)
filterResult = imfilter(
O_img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
O_img = filterResult[..., ::2, ::2]
filterResult = imfilter(
img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
img = filterResult[..., ::2, ::2]
distparam = torch.cat(distparam, dim=-1) # b, block_num, feature_num
distparam[distparam > infConst] = infConst
# fit a MVG (multivariate Gaussian) model to distorted patch features
coefficientsViaPCA = torch.bmm(
principleVectors.transpose(1, 2),
(distparam - meanOfSampleData.unsqueeze(1)).transpose(1, 2),
)
final_features = coefficientsViaPCA.transpose(1, 2)
b, blk_num, feat_num = final_features.shape
# remove block features with nan and compute nonan cov
cov_distparam = nancov(final_features)
# replace nan in final features with mu
mu_final_features = nanmean(final_features, dim=1, keepdim=True)
final_features_withmu = torch.where(
torch.isnan(final_features), mu_final_features, final_features
)
# compute ilniqe quality
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = final_features_withmu - mu_pris_param.unsqueeze(1)
quality = (torch.bmm(diff, invcov_param) * diff).sum(dim=-1)
quality = torch.sqrt(quality).mean(dim=1)
return quality
def calculate_ilniqe(
img: torch.Tensor, crop_border: int = 0, pretrained_model_path: str = None, **kwargs
) -> torch.Tensor:
"""Calculate IL-NIQE metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: IL-NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
img = img * 255.0
img = diff_round(img)
# float64 precision is critical to be consistent with matlab codes
img = img.to(torch.float64)
mu_pris_param = np.ravel(params["templateModel"][0][0])
cov_pris_param = params["templateModel"][0][1]
meanOfSampleData = np.ravel(params["templateModel"][0][2])
principleVectors = params["templateModel"][0][3]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
meanOfSampleData = torch.from_numpy(meanOfSampleData).to(img)
principleVectors = torch.from_numpy(principleVectors).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
meanOfSampleData = meanOfSampleData.repeat(img.size(0), 1)
principleVectors = principleVectors.repeat(img.size(0), 1, 1)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
ilniqe_result = ilniqe(
img, mu_pris_param, cov_pris_param, principleVectors, meanOfSampleData
)
return ilniqe_result
@ARCH_REGISTRY.register()
class NIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Mittal, Anish, Rajiv Soundararajan, and Alan C. Bovik.
"Making a “completely blind” image quality analyzer."
IEEE Signal Processing Letters (SPL) 20.3 (2012): 209-212.
"""
def __init__(
self,
channels: int = 1,
test_y_channel: bool = True,
color_space: str = "yiq",
crop_border: int = 0,
pretrained_model_path: str = None,
) -> None:
super(NIQE, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
self.color_space = color_space
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_niqe(
X,
self.crop_border,
self.test_y_channel,
self.pretrained_model_path,
self.color_space,
)
return score
@ARCH_REGISTRY.register()
class ILNIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Zhang, Lin, Lei Zhang, and Alan C. Bovik. "A feature-enriched
completely blind image quality evaluator." IEEE Transactions
on Image Processing 24.8 (2015): 2579-2591.
"""
def __init__(
self, channels: int = 3, crop_border: int = 0, pretrained_model_path: str = None
) -> None:
super(ILNIQE, self).__init__()
self.channels = channels
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(
default_model_urls["ilniqe"]
)
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_ilniqe(X, self.crop_border, self.pretrained_model_path)
return score
| 20,124 | 35.196043 | 120 | py |
BVQI | BVQI-master/pyiqa/archs/__init__.py | import importlib
from copy import deepcopy
from os import path as osp
from pyiqa.utils import get_root_logger, scandir
from pyiqa.utils.registry import ARCH_REGISTRY
__all__ = ["build_network"]
# automatically scan and import arch modules for registry
# scan all the files under the 'archs' folder and collect files ending with
# '_arch.py'
arch_folder = osp.dirname(osp.abspath(__file__))
arch_filenames = [
osp.splitext(osp.basename(v))[0]
for v in scandir(arch_folder)
if v.endswith("_arch.py")
]
# import all the arch modules
_arch_modules = [
importlib.import_module(f"pyiqa.archs.{file_name}") for file_name in arch_filenames
]
def build_network(opt):
opt = deepcopy(opt)
network_type = opt.pop("type")
net = ARCH_REGISTRY.get(network_type)(**opt)
logger = get_root_logger()
logger.info(f"Network [{net.__class__.__name__}] is created.")
return net
| 900 | 27.15625 | 87 | py |
BVQI | BVQI-master/pyiqa/archs/wadiqam_arch.py | r"""WaDIQaM model.
Reference:
Bosse, Sebastian, Dominique Maniry, Klaus-Robert Müller, Thomas Wiegand,
and Wojciech Samek. "Deep neural networks for no-reference and full-reference
image quality assessment." IEEE Transactions on image processing 27, no. 1
(2017): 206-219.
Created by: https://github.com/lidq92/WaDIQaM
Modified by: Chaofeng Chen (https://github.com/chaofengc)
Refer to:
Official code from https://github.com/dmaniry/deepIQA
"""
from typing import List, Union, cast
import torch
import torch.nn as nn
from pyiqa.utils.registry import ARCH_REGISTRY
def make_layers(cfg: List[Union[str, int]]) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
@ARCH_REGISTRY.register()
class WaDIQaM(nn.Module):
"""WaDIQaM model.
Args:
metric_mode (String): Choose metric mode.
weighted_average (Boolean): Average the weight.
train_patch_num (int): Number of patch trained. Default: 32.
pretrained_model_path (String): The pretrained model path.
load_feature_weight_only (Boolean): Only load featureweight.
eps (float): Constant value.
"""
def __init__(
self,
metric_mode="FR",
weighted_average=True,
train_patch_num=32,
pretrained_model_path=None,
load_feature_weight_only=False,
eps=1e-8,
):
super(WaDIQaM, self).__init__()
backbone_cfg = [
32,
32,
"M",
64,
64,
"M",
128,
128,
"M",
256,
256,
"M",
512,
512,
"M",
]
self.features = make_layers(backbone_cfg)
self.train_patch_num = train_patch_num
self.patch_size = 32 # This cannot be changed due to network design
self.metric_mode = metric_mode
fc_in_channel = 512 * 3 if metric_mode == "FR" else 512
self.eps = eps
self.fc_q = nn.Sequential(
nn.Linear(fc_in_channel, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 1),
)
self.weighted_average = weighted_average
if weighted_average:
self.fc_w = nn.Sequential(
nn.Linear(fc_in_channel, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 1),
nn.ReLU(True),
)
if pretrained_model_path is not None:
self.load_pretrained_network(
pretrained_model_path, load_feature_weight_only
)
def load_pretrained_network(self, model_path, load_feature_weight_only=False):
state_dict = torch.load(model_path, map_location=torch.device("cpu"))[
"state_dict"
]
if load_feature_weight_only:
print("Only load backbone feature net")
new_state_dict = {}
for k in state_dict.keys():
if "features" in k:
new_state_dict[k] = state_dict[k]
self.net.load_state_dict(new_state_dict, strict=False)
else:
self.net.load_state_dict(state_dict, strict=True)
def _get_random_patches(self, x, y=None):
"""train with random crop patches"""
self.patch_num = self.train_patch_num
b, c, h, w = x.shape
th = tw = self.patch_size
cropped_x = []
cropped_y = []
for s in range(self.train_patch_num):
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
cropped_x.append(x[:, :, i : i + th, j : j + tw])
if y is not None:
cropped_y.append(y[:, :, i : i + th, j : j + tw])
if y is not None:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
cropped_y = torch.stack(cropped_y, dim=1).reshape(-1, c, th, tw)
return cropped_x, cropped_y
else:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
return cropped_x
def _get_nonoverlap_patches(self, x, y=None):
"""test with non overlap patches"""
self.patch_num = 0
b, c, h, w = x.shape
th = tw = self.patch_size
cropped_x = []
cropped_y = []
for i in range(0, h - th, th):
for j in range(0, w - tw, tw):
cropped_x.append(x[:, :, i : i + th, j : j + tw])
if y is not None:
cropped_y.append(y[:, :, i : i + th, j : j + tw])
self.patch_num += 1
if y is not None:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
cropped_y = torch.stack(cropped_y, dim=1).reshape(-1, c, th, tw)
return cropped_x, cropped_y
else:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
return cropped_x
def get_patches(self, x, y=None):
if self.training:
return self._get_random_patches(x, y)
else:
return self._get_nonoverlap_patches(x, y)
def extract_features(self, patches):
h = self.features(patches)
h = h.reshape(-1, self.patch_num, 512)
return h
def forward(self, x, y=None):
r"""WaDIQaM model.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
"""
if self.metric_mode == "FR":
assert y is not None, "Full reference metric requires reference input"
x_patches, y_patches = self.get_patches(x, y)
feat_img = self.extract_features(x_patches)
feat_ref = self.extract_features(y_patches)
feat_q = torch.cat((feat_ref, feat_img, feat_img - feat_ref), dim=-1)
else:
x_patches = self.get_patches(x)
feat_q = self.extract_features(x_patches)
q_score = self.fc_q(feat_q)
weight = self.fc_w(feat_q) + self.eps # add eps to avoid training collapse
if self.weighted_average:
q_final = torch.sum(q_score * weight, dim=1) / torch.sum(weight, dim=1)
else:
q_final = q_score.mean(dim=1)
return q_final.reshape(-1, 1)
| 6,704 | 31.391304 | 83 | py |
BVQI | BVQI-master/pyiqa/archs/cnniqa_arch.py | r"""CNNIQA Model.
Created by: https://github.com/lidq92/CNNIQA
Modified by: Chaofeng Chen (https://github.com/chaofengc)
Modification:
- We use 3 channel RGB input.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"koniq10k": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/CNNIQA_koniq10k-fd89516f.pth"
}
@ARCH_REGISTRY.register()
class CNNIQA(nn.Module):
r"""CNNIQA model.
Args:
ker_size (int): Kernel size.
n_kers (int): Number of kernals.
n1_nodes (int): Number of n1 nodes.
n2_nodes (int): Number of n2 nodes.
pretrained_model_path (String): Pretrained model path.
Reference:
Kang, Le, Peng Ye, Yi Li, and David Doermann. "Convolutional
neural networks for no-reference image quality assessment."
In Proceedings of the IEEE conference on computer vision and
pattern recognition, pp. 1733-1740. 2014.
"""
def __init__(
self,
ker_size=7,
n_kers=50,
n1_nodes=800,
n2_nodes=800,
pretrained="koniq10k",
pretrained_model_path=None,
):
super(CNNIQA, self).__init__()
self.conv1 = nn.Conv2d(3, n_kers, ker_size)
self.fc1 = nn.Linear(2 * n_kers, n1_nodes)
self.fc2 = nn.Linear(n1_nodes, n2_nodes)
self.fc3 = nn.Linear(n2_nodes, 1)
self.dropout = nn.Dropout()
if pretrained_model_path is None and pretrained is not None:
pretrained_model_path = default_model_urls[pretrained]
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, True, "params")
def forward(self, x):
r"""Compute IQA using CNNIQA model.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of CNNIQA model.
"""
h = self.conv1(x)
h1 = F.max_pool2d(h, (h.size(-2), h.size(-1)))
h2 = -F.max_pool2d(-h, (h.size(-2), h.size(-1)))
h = torch.cat((h1, h2), 1) # max-min pooling
h = h.squeeze(3).squeeze(2)
h = F.relu(self.fc1(h))
h = self.dropout(h)
h = F.relu(self.fc2(h))
q = self.fc3(h)
return q
| 2,425 | 26.568182 | 118 | py |
BVQI | BVQI-master/pyiqa/archs/iqt_arch.py | import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from pyexpat import model
from timm.models.resnet import BasicBlock, Bottleneck
from timm.models.vision_transformer import Block
from torchvision.ops.deform_conv import DeformConv2d
from pyiqa.archs.arch_util import (
ExactPadding2d,
default_init_weights,
load_pretrained_network,
to_2tuple,
)
from pyiqa.utils.registry import ARCH_REGISTRY
class IQARegression(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv_enc = nn.Conv2d(
in_channels=320 * 6, out_channels=config.d_hidn, kernel_size=1
)
self.conv_dec = nn.Conv2d(
in_channels=320 * 6, out_channels=config.d_hidn, kernel_size=1
)
self.transformer = Transformer(self.config)
self.projection = nn.Sequential(
nn.Linear(self.config.d_hidn, self.config.d_MLP_head, bias=False),
nn.ReLU(),
nn.Linear(self.config.d_MLP_head, self.config.n_output, bias=False),
)
def forward(self, enc_inputs, enc_inputs_embed, dec_inputs, dec_inputs_embed):
# batch x (320*6) x 29 x 29 -> batch x 256 x 29 x 29
enc_inputs_embed = self.conv_enc(enc_inputs_embed)
dec_inputs_embed = self.conv_dec(dec_inputs_embed)
# batch x 256 x 29 x 29 -> batch x 256 x (29*29)
b, c, h, w = enc_inputs_embed.size()
enc_inputs_embed = torch.reshape(enc_inputs_embed, (b, c, h * w))
enc_inputs_embed = enc_inputs_embed.permute(0, 2, 1)
# batch x 256 x (29*29) -> batch x (29*29) x 256
dec_inputs_embed = torch.reshape(dec_inputs_embed, (b, c, h * w))
dec_inputs_embed = dec_inputs_embed.permute(0, 2, 1)
# (bs, n_dec_seq+1, d_hidn), [(bs, n_head, n_enc_seq+1, n_enc_seq+1)], [(bs, n_head, n_dec_seq+1, n_dec_seq+1)], [(bs, n_head, n_dec_seq+1, n_enc_seq+1)]
(
dec_outputs,
enc_self_attn_probs,
dec_self_attn_probs,
dec_enc_attn_probs,
) = self.transformer(enc_inputs, enc_inputs_embed, dec_inputs, dec_inputs_embed)
# (bs, n_dec_seq+1, d_hidn) -> (bs, d_hidn)
# dec_outputs, _ = torch.max(dec_outputs, dim=1) # original transformer
dec_outputs = dec_outputs[:, 0, :] # in the IQA paper
# dec_outputs = torch.mean(dec_outputs, dim=1) # general idea
# (bs, n_output)
pred = self.projection(dec_outputs)
return pred
""" transformer """
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.encoder = Encoder(self.config)
self.decoder = Decoder(self.config)
def forward(self, enc_inputs, enc_inputs_embed, dec_inputs, dec_inputs_embed):
# (bs, n_enc_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
enc_outputs, enc_self_attn_probs = self.encoder(enc_inputs, enc_inputs_embed)
# (bs, n_seq, d_hidn), [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]
dec_outputs, dec_self_attn_probs, dec_enc_attn_probs = self.decoder(
dec_inputs, dec_inputs_embed, enc_inputs, enc_outputs
)
# (bs, n_dec_seq, n_dec_vocab), [(bs, n_head, n_enc_seq, n_enc_seq)], [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]
return dec_outputs, enc_self_attn_probs, dec_self_attn_probs, dec_enc_attn_probs
""" encoder """
class Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# fixed position embedding
# sinusoid_table = torch.FloatTensor(get_sinusoid_encoding_table(self.config.n_enc_seq+1, self.config.d_hidn))
# self.pos_emb = nn.Embedding.from_pretrained(sinusoid_table, freeze=True)
# learnable position embedding
self.pos_embedding = nn.Parameter(
torch.randn(1, self.config.n_enc_seq + 1, self.config.d_hidn)
)
self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.d_hidn))
self.dropout = nn.Dropout(self.config.emb_dropout)
self.layers = nn.ModuleList(
[EncoderLayer(self.config) for _ in range(self.config.n_layer)]
)
def forward(self, inputs, inputs_embed):
# inputs: batch x (len_seq+1) / inputs_embed: batch x len_seq x n_feat
b, n, _ = inputs_embed.shape
# positions: batch x (len_seq+1)
positions = (
torch.arange(inputs.size(1), device=inputs.device, dtype=torch.int64)
.expand(inputs.size(0), inputs.size(1))
.contiguous()
+ 1
)
pos_mask = inputs.eq(self.config.i_pad)
positions.masked_fill_(pos_mask, 0)
# outputs: batch x (len_seq+1) x n_feat
cls_tokens = repeat(self.cls_token, "() n d -> b n d", b=b)
x = torch.cat((cls_tokens, inputs_embed), dim=1)
x += self.pos_embedding
# x += self.pos_emb(positions)
outputs = self.dropout(x)
# (bs, n_enc_seq+1, n_enc_seq+1)
attn_mask = get_attn_pad_mask(inputs, inputs, self.config.i_pad)
attn_probs = []
for layer in self.layers:
# (bs, n_enc_seq+1, d_hidn), (bs, n_head, n_enc_seq+1, n_enc_seq+1)
outputs, attn_prob = layer(outputs, attn_mask)
attn_probs.append(attn_prob)
# (bs, n_enc_seq+1, d_hidn), [(bs, n_head, n_enc_seq+1, n_enc_seq+1)]
return outputs, attn_probs
""" encoder layer """
class EncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.self_attn = MultiHeadAttention(self.config)
self.layer_norm1 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
self.pos_ffn = PoswiseFeedForwardNet(self.config)
self.layer_norm2 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
def forward(self, inputs, attn_mask):
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
att_outputs, attn_prob = self.self_attn(inputs, inputs, inputs, attn_mask)
att_outputs = self.layer_norm1(inputs + att_outputs)
# (bs, n_enc_seq, d_hidn)
ffn_outputs = self.pos_ffn(att_outputs)
ffn_outputs = self.layer_norm2(ffn_outputs + att_outputs)
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
return ffn_outputs, attn_prob
def get_sinusoid_encoding_table(n_seq, d_hidn):
def cal_angle(position, i_hidn):
return position / np.power(10000, 2 * (i_hidn // 2) / d_hidn)
def get_posi_angle_vec(position):
return [cal_angle(position, i_hidn) for i_hidn in range(d_hidn)]
sinusoid_table = np.array([get_posi_angle_vec(i_seq) for i_seq in range(n_seq)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # even index sin
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # odd index cos
return sinusoid_table
""" attention pad mask """
def get_attn_pad_mask(seq_q, seq_k, i_pad):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(i_pad)
pad_attn_mask = pad_attn_mask.unsqueeze(1).expand(batch_size, len_q, len_k)
return pad_attn_mask
""" multi head attention """
class MultiHeadAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.W_Q = nn.Linear(
self.config.d_hidn, self.config.n_head * self.config.d_head
)
self.W_K = nn.Linear(
self.config.d_hidn, self.config.n_head * self.config.d_head
)
self.W_V = nn.Linear(
self.config.d_hidn, self.config.n_head * self.config.d_head
)
self.scaled_dot_attn = ScaledDotProductAttention(self.config)
self.linear = nn.Linear(
self.config.n_head * self.config.d_head, self.config.d_hidn
)
self.dropout = nn.Dropout(config.dropout)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
# (bs, n_head, n_q_seq, d_head)
q_s = (
self.W_Q(Q)
.view(batch_size, -1, self.config.n_head, self.config.d_head)
.transpose(1, 2)
)
# (bs, n_head, n_k_seq, d_head)
k_s = (
self.W_K(K)
.view(batch_size, -1, self.config.n_head, self.config.d_head)
.transpose(1, 2)
)
# (bs, n_head, n_v_seq, d_head)
v_s = (
self.W_V(V)
.view(batch_size, -1, self.config.n_head, self.config.d_head)
.transpose(1, 2)
)
# (bs, n_head, n_q_seq, n_k_seq)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.config.n_head, 1, 1)
# (bs, n_head, n_q_seq, d_head), (bs, n_head, n_q_seq, n_k_seq)
context, attn_prob = self.scaled_dot_attn(q_s, k_s, v_s, attn_mask)
# (bs, n_head, n_q_seq, h_head * d_head)
context = (
context.transpose(1, 2)
.contiguous()
.view(batch_size, -1, self.config.n_head * self.config.d_head)
)
# (bs, n_head, n_q_seq, e_embd)
output = self.linear(context)
output = self.dropout(output)
# (bs, n_q_seq, d_hidn), (bs, n_head, n_q_seq, n_k_seq)
return output, attn_prob
""" scale dot product attention """
class ScaledDotProductAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dropout = nn.Dropout(config.dropout)
self.scale = 1 / (self.config.d_head ** 0.5)
def forward(self, Q, K, V, attn_mask):
# (bs, n_head, n_q_seq, n_k_seq)
scores = torch.matmul(Q, K.transpose(-1, -2))
scores = scores.mul_(self.scale)
scores.masked_fill_(attn_mask, -1e9)
# (bs, n_head, n_q_seq, n_k_seq)
attn_prob = nn.Softmax(dim=-1)(scores)
attn_prob = self.dropout(attn_prob)
# (bs, n_head, n_q_seq, d_v)
context = torch.matmul(attn_prob, V)
# (bs, n_head, n_q_seq, d_v), (bs, n_head, n_q_seq, n_v_seq)
return context, attn_prob
""" feed forward """
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(
in_channels=self.config.d_hidn, out_channels=self.config.d_ff, kernel_size=1
)
self.conv2 = nn.Conv1d(
in_channels=self.config.d_ff, out_channels=self.config.d_hidn, kernel_size=1
)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, inputs):
# (bs, d_ff, n_seq)
output = self.conv1(inputs.transpose(1, 2))
output = self.active(output)
# (bs, n_seq, d_hidn)
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
# (bs, n_seq, d_hidn)
return output
""" decoder """
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_embedding = nn.Parameter(
torch.randn(1, self.config.n_enc_seq + 1, self.config.d_hidn)
)
self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.d_hidn))
self.dropout = nn.Dropout(self.config.emb_dropout)
self.layers = nn.ModuleList(
[DecoderLayer(self.config) for _ in range(self.config.n_layer)]
)
def forward(self, dec_inputs, dec_inputs_embed, enc_inputs, enc_outputs):
# enc_inputs: batch x (len_seq+1) / enc_outputs: batch x (len_seq+1) x n_feat
# dec_inputs: batch x (len_seq+1) / dec_inputs_embed: batch x len_seq x n_feat
b, n, _ = dec_inputs_embed.shape
cls_tokens = repeat(self.cls_token, "() n d -> b n d", b=b)
x = torch.cat((cls_tokens, dec_inputs_embed), dim=1)
x += self.pos_embedding[:, : (n + 1)]
# (bs, n_dec_seq+1, d_hidn)
dec_outputs = self.dropout(x)
# (bs, n_dec_seq+1, n_dec_seq+1)
dec_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs, self.config.i_pad)
# (bs, n_dec_seq+1, n_dec_seq+1)
dec_attn_decoder_mask = get_attn_decoder_mask(dec_inputs)
# (bs, n_dec_seq+1, n_dec_seq+1)
dec_self_attn_mask = torch.gt((dec_attn_pad_mask + dec_attn_decoder_mask), 0)
# (bs, n_dec_seq+1, n_enc_seq+1)
dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs, self.config.i_pad)
self_attn_probs, dec_enc_attn_probs = [], []
for layer in self.layers:
# (bs, n_dec_seq+1, d_hidn), (bs, n_dec_seq+1, n_dec_seq+1), (bs, n_dec_seq+1, n_enc_seq+1)
dec_outputs, self_attn_prob, dec_enc_attn_prob = layer(
dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask
)
self_attn_probs.append(self_attn_prob)
dec_enc_attn_probs.append(dec_enc_attn_prob)
# (bs, n_dec_seq+1, d_hidn), [(bs, n_dec_seq+1, n_dec_seq+1)], [(bs, n_dec_seq+1, n_enc_seq+1)]
return dec_outputs, self_attn_probs, dec_enc_attn_probs
""" decoder layer """
class DecoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.self_attn = MultiHeadAttention(self.config)
self.layer_norm1 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
self.dec_enc_attn = MultiHeadAttention(self.config)
self.layer_norm2 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
self.pos_ffn = PoswiseFeedForwardNet(self.config)
self.layer_norm3 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
def forward(self, dec_inputs, enc_outputs, self_attn_mask, dec_enc_attn_mask):
# (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_dec_seq)
self_att_outputs, self_attn_prob = self.self_attn(
dec_inputs, dec_inputs, dec_inputs, self_attn_mask
)
self_att_outputs = self.layer_norm1(dec_inputs + self_att_outputs)
# (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_enc_seq)
dec_enc_att_outputs, dec_enc_attn_prob = self.dec_enc_attn(
self_att_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask
)
dec_enc_att_outputs = self.layer_norm2(self_att_outputs + dec_enc_att_outputs)
# (bs, n_dec_seq, d_hidn)
ffn_outputs = self.pos_ffn(dec_enc_att_outputs)
ffn_outputs = self.layer_norm3(dec_enc_att_outputs + ffn_outputs)
# (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_dec_seq), (bs, n_head, n_dec_seq, n_enc_seq)
return ffn_outputs, self_attn_prob, dec_enc_attn_prob
""" attention decoder mask """
def get_attn_decoder_mask(seq):
subsequent_mask = (
torch.ones_like(seq).unsqueeze(-1).expand(seq.size(0), seq.size(1), seq.size(1))
)
subsequent_mask = subsequent_mask.triu(
diagonal=1
) # upper triangular part of a matrix(2-D)
return subsequent_mask
def random_crop(x, y, crop_size, crop_num):
b, c, h, w = x.shape
ch, cw = to_2tuple(crop_size)
crops_x = []
crops_y = []
for i in range(crop_num):
sh = np.random.randint(0, h - ch)
sw = np.random.randint(0, w - cw)
crops_x.append(x[..., sh : sh + ch, sw : sw + cw])
crops_y.append(y[..., sh : sh + ch, sw : sw + cw])
crops_x = torch.stack(crops_x, dim=1)
crops_y = torch.stack(crops_y, dim=1)
return crops_x.reshape(b * crop_num, c, ch, cw), crops_y.reshape(
b * crop_num, c, ch, cw
)
class SaveOutput:
def __init__(self):
self.outputs = {}
def __call__(self, module, module_in, module_out):
if module_out.device in self.outputs.keys():
self.outputs[module_out.device].append(module_out)
else:
self.outputs[module_out.device] = [module_out]
def clear(self, device):
self.outputs[device] = []
class DeformFusion(nn.Module):
def __init__(
self,
patch_size=8,
in_channels=768 * 5,
cnn_channels=256 * 3,
out_channels=256 * 3,
):
super().__init__()
# in_channels, out_channels, kernel_size, stride, padding
self.d_hidn = 512
if patch_size == 8:
stride = 1
else:
stride = 2
self.conv_offset = nn.Conv2d(in_channels, 2 * 3 * 3, 3, 1, 1)
self.deform = DeformConv2d(cnn_channels, out_channels, 3, 1, 1)
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=out_channels,
out_channels=self.d_hidn,
kernel_size=3,
padding=1,
stride=2,
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn,
out_channels=out_channels,
kernel_size=3,
padding=1,
stride=stride,
),
)
def forward(self, cnn_feat, vit_feat):
vit_feat = F.interpolate(vit_feat, size=cnn_feat.shape[-2:], mode="nearest")
offset = self.conv_offset(vit_feat)
deform_feat = self.deform(cnn_feat, offset)
deform_feat = self.conv1(deform_feat)
return deform_feat
class Pixel_Prediction(nn.Module):
def __init__(self, inchannels=768 * 5 + 256 * 3, outchannels=256, d_hidn=1024):
super().__init__()
self.d_hidn = d_hidn
self.down_channel = nn.Conv2d(inchannels, outchannels, kernel_size=1)
self.feat_smoothing = nn.Sequential(
nn.Conv2d(
in_channels=256 * 3, out_channels=self.d_hidn, kernel_size=3, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn, out_channels=512, kernel_size=3, padding=1
),
)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(),
)
self.conv_attent = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1), nn.Sigmoid()
)
self.conv = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1),
)
def forward(self, f_dis, f_ref, cnn_dis, cnn_ref):
f_dis = torch.cat((f_dis, cnn_dis), 1)
f_ref = torch.cat((f_ref, cnn_ref), 1)
f_dis = self.down_channel(f_dis)
f_ref = self.down_channel(f_ref)
f_cat = torch.cat((f_dis - f_ref, f_dis, f_ref), 1)
feat_fused = self.feat_smoothing(f_cat)
feat = self.conv1(feat_fused)
f = self.conv(feat)
w = self.conv_attent(feat)
pred = (f * w).sum(dim=-1).sum(dim=-1) / w.sum(dim=-1).sum(dim=-1)
return pred
@ARCH_REGISTRY.register()
class IQT(nn.Module):
def __init__(
self,
num_crop=20,
config_dataset="live",
default_mean=timm.data.IMAGENET_INCEPTION_MEAN,
default_std=timm.data.IMAGENET_INCEPTION_STD,
pretrained=False,
pretrained_model_path=None,
):
super().__init__()
self.backbone = timm.create_model("inception_resnet_v2", pretrained=True)
self.fix_network(self.backbone)
class Config:
def __init__(self, dataset=config_dataset) -> None:
if dataset in ["live", "csiq", "tid"]:
# model for PIPAL (NTIRE2021 Challenge)
self.n_enc_seq = (
29 * 29
) # feature map dimension (H x W) from backbone, this size is related to crop_size
self.n_dec_seq = (
29 * 29
) # feature map dimension (H x W) from backbone
self.n_layer = 2 # number of encoder/decoder layers
self.d_hidn = (
256 # input channel (C) of encoder / decoder (input: C x N)
)
self.i_pad = 0
self.d_ff = 1024 # feed forward hidden layer dimension
self.d_MLP_head = 512 # hidden layer of final MLP
self.n_head = 4 # number of head (in multi-head attention)
self.d_head = 256 # input channel (C) of each head (input: C x N) -> same as d_hidn
self.dropout = 0.1 # dropout ratio of transformer
self.emb_dropout = 0.1 # dropout ratio of input embedding
self.layer_norm_epsilon = 1e-12
self.n_output = 1 # dimension of final prediction
self.crop_size = 256 # input image crop size
elif dataset == "pipal":
# model for PIPAL (NTIRE2021 Challenge)
self.n_enc_seq = (
21 * 21
) # feature map dimension (H x W) from backbone, this size is related to crop_size
self.n_dec_seq = (
21 * 21
) # feature map dimension (H x W) from backbone
self.n_layer = 1 # number of encoder/decoder layers
self.d_hidn = (
128 # input channel (C) of encoder / decoder (input: C x N)
)
self.i_pad = 0
self.d_ff = 1024 # feed forward hidden layer dimension
self.d_MLP_head = 128 # hidden layer of final MLP
self.n_head = 4 # number of head (in multi-head attention)
self.d_head = 128 # input channel (C) of each head (input: C x N) -> same as d_hidn
self.dropout = 0.1 # dropout ratio of transformer
self.emb_dropout = 0.1 # dropout ratio of input embedding
self.layer_norm_epsilon = 1e-12
self.n_output = 1 # dimension of final prediction
self.crop_size = 192 # input image crop size
config = Config()
self.config = config
self.register_buffer("enc_inputs", torch.ones(1, config.n_enc_seq + 1))
self.register_buffer("dec_inputs", torch.ones(1, config.n_dec_seq + 1))
self.regressor = IQARegression(config)
# register hook to get intermediate features
self.init_saveoutput()
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, False, weight_keys="params"
)
self.eps = 1e-12
self.crops = num_crop
self.crop_size = config.crop_size
def init_saveoutput(self):
self.save_output = SaveOutput()
hook_handles = []
for layer in self.backbone.modules():
if type(layer).__name__ == "Mixed_5b":
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
elif type(layer).__name__ == "Block35":
handle = layer.register_forward_hook(self.save_output)
def fix_network(self, model):
for p in model.parameters():
p.requires_grad = False
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
@torch.no_grad()
def get_backbone_feature(self, x):
self.backbone(x)
feat = torch.cat(
(
self.save_output.outputs[x.device][0],
self.save_output.outputs[x.device][2],
self.save_output.outputs[x.device][4],
self.save_output.outputs[x.device][6],
self.save_output.outputs[x.device][8],
self.save_output.outputs[x.device][10],
),
dim=1,
)
self.save_output.clear(x.device)
return feat
def regress_score(self, dis, ref):
assert (
dis.shape[-1] == dis.shape[-2] == self.config.crop_size
), f"Input shape should be {self.config.crop_size, self.config.crop_size} but got {dis.shape[2:]}"
self.backbone.eval()
dis = self.preprocess(dis)
ref = self.preprocess(ref)
feat_dis = self.get_backbone_feature(dis)
feat_ref = self.get_backbone_feature(ref)
feat_diff = feat_ref - feat_dis
score = self.regressor(self.enc_inputs, feat_diff, self.dec_inputs, feat_ref)
return score
def forward(self, x, y):
bsz = x.shape[0]
if self.crops > 1 and not self.training:
x, y = random_crop(x, y, self.crop_size, self.crops)
score = self.regress_score(x, y)
score = score.reshape(bsz, self.crops, 1)
score = score.mean(dim=1)
else:
score = self.regress_score(x, y)
return score
| 25,389 | 35.323319 | 161 | py |
BVQI | BVQI-master/pyiqa/archs/func_util.py | from typing import Tuple
import torch
import torch.nn.functional as F
from pyiqa.matlab_utils import fspecial, imfilter
from .arch_util import excact_padding_2d
EPS = torch.finfo(torch.float32).eps
def extract_2d_patches(x, kernel, stride=1, dilation=1, padding="same"):
"""
Ref: https://stackoverflow.com/a/65886666
"""
b, c, h, w = x.shape
if padding != "none":
x = excact_padding_2d(x, kernel, stride, dilation, mode=padding)
# Extract patches
patches = F.unfold(x, kernel, dilation, stride=stride)
b, _, pnum = patches.shape
patches = patches.transpose(1, 2).reshape(b, pnum, c, kernel, kernel)
return patches
def torch_cov(tensor, rowvar=True, bias=False):
r"""Estimate a covariance matrix (np.cov)
Ref: https://gist.github.com/ModarTensai/5ab449acba9df1a26c12060240773110
"""
tensor = tensor if rowvar else tensor.transpose(-1, -2)
tensor = tensor - tensor.mean(dim=-1, keepdim=True)
factor = 1 / (tensor.shape[-1] - int(not bool(bias)))
return factor * tensor @ tensor.transpose(-1, -2)
def safe_sqrt(x: torch.Tensor) -> torch.Tensor:
r"""Safe sqrt with EPS to ensure numeric stability.
Args:
x (torch.Tensor): should be non-negative
"""
EPS = torch.finfo(x.dtype).eps
return torch.sqrt(x + EPS)
def diff_round(x: torch.Tensor) -> torch.Tensor:
r"""Differentiable round."""
return x - x.detach() + x.round()
def normalize_img_with_guass(
img: torch.Tensor,
kernel_size: int = 7,
sigma: float = 7.0 / 6,
C: int = 1,
padding: str = "same",
):
kernel = fspecial(kernel_size, sigma, 1).to(img)
mu = imfilter(img, kernel, padding=padding)
std = imfilter(img ** 2, kernel, padding=padding)
sigma = safe_sqrt((std - mu ** 2).abs())
img_normalized = (img - mu) / (sigma + C)
return img_normalized
# Gradient operator kernels
def scharr_filter() -> torch.Tensor:
r"""Utility function that returns a normalized 3x3 Scharr kernel in X direction
Returns:
kernel: Tensor with shape (1, 3, 3)
"""
return torch.tensor([[[-3.0, 0.0, 3.0], [-10.0, 0.0, 10.0], [-3.0, 0.0, 3.0]]]) / 16
def gradient_map(x: torch.Tensor, kernels: torch.Tensor) -> torch.Tensor:
r"""Compute gradient map for a given tensor and stack of kernels.
Args:
x: Tensor with shape (N, C, H, W).
kernels: Stack of tensors for gradient computation with shape (k_N, k_H, k_W)
Returns:
Gradients of x per-channel with shape (N, C, H, W)
"""
padding = kernels.size(-1) // 2
grads = torch.nn.functional.conv2d(x, kernels.to(x), padding=padding)
return safe_sqrt(torch.sum(grads ** 2, dim=-3, keepdim=True))
def similarity_map(
map_x: torch.Tensor, map_y: torch.Tensor, constant: float, alpha: float = 0.0
) -> torch.Tensor:
r"""Compute similarity_map between two tensors using Dice-like equation.
Args:
map_x: Tensor with map to be compared
map_y: Tensor with map to be compared
constant: Used for numerical stability
alpha: Masking coefficient. Substracts - `alpha` * map_x * map_y from denominator and nominator
"""
return (2.0 * map_x * map_y - alpha * map_x * map_y + constant) / (
map_x ** 2 + map_y ** 2 - alpha * map_x * map_y + constant + EPS
)
def ifftshift(x: torch.Tensor) -> torch.Tensor:
r"""Similar to np.fft.ifftshift but applies to PyTorch Tensors"""
shift = [-(ax // 2) for ax in x.size()]
return torch.roll(x, shift, tuple(range(len(shift))))
def get_meshgrid(size: Tuple[int, int]) -> torch.Tensor:
r"""Return coordinate grid matrices centered at zero point.
Args:
size: Shape of meshgrid to create
"""
if size[0] % 2:
# Odd
x = torch.arange(-(size[0] - 1) / 2, size[0] / 2) / (size[0] - 1)
else:
# Even
x = torch.arange(-size[0] / 2, size[0] / 2) / size[0]
if size[1] % 2:
# Odd
y = torch.arange(-(size[1] - 1) / 2, size[1] / 2) / (size[1] - 1)
else:
# Even
y = torch.arange(-size[1] / 2, size[1] / 2) / size[1]
return torch.meshgrid(x, y, indexing="ij")
def estimate_ggd_param(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Estimate general gaussian distribution.
Args:
x (Tensor): shape (b, 1, h, w)
"""
gamma = torch.arange(0.2, 10 + 0.001, 0.001).to(x)
r_table = (
torch.lgamma(1.0 / gamma)
+ torch.lgamma(3.0 / gamma)
- 2 * torch.lgamma(2.0 / gamma)
).exp()
r_table = r_table.repeat(x.size(0), 1)
sigma_sq = x.pow(2).mean(dim=(-1, -2))
sigma = sigma_sq.sqrt().squeeze(dim=-1)
assert not torch.isclose(
sigma, torch.zeros_like(sigma)
).all(), "Expected image with non zero variance of pixel values"
E = x.abs().mean(dim=(-1, -2))
rho = sigma_sq / E ** 2
indexes = (rho - r_table).abs().argmin(dim=-1)
solution = gamma[indexes]
return solution, sigma
def estimate_aggd_param(
block: torch.Tensor, return_sigma=False
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Estimate AGGD (Asymmetric Generalized Gaussian Distribution) parameters.
Args:
block (Tensor): Image block with shape (b, 1, h, w).
Returns:
Tensor: alpha, beta_l and beta_r for the AGGD distribution
(Estimating the parames in Equation 7 in the paper).
"""
gam = torch.arange(0.2, 10 + 0.001, 0.001).to(block)
r_gam = (
2 * torch.lgamma(2.0 / gam)
- (torch.lgamma(1.0 / gam) + torch.lgamma(3.0 / gam))
).exp()
r_gam = r_gam.repeat(block.shape[0], 1)
mask_left = block < 0
mask_right = block > 0
count_left = mask_left.sum(dim=(-1, -2), dtype=torch.float32)
count_right = mask_right.sum(dim=(-1, -2), dtype=torch.float32)
left_std = torch.sqrt((block * mask_left).pow(2).sum(dim=(-1, -2)) / (count_left))
right_std = torch.sqrt(
(block * mask_right).pow(2).sum(dim=(-1, -2)) / (count_right)
)
gammahat = left_std / right_std
rhat = block.abs().mean(dim=(-1, -2)).pow(2) / block.pow(2).mean(dim=(-1, -2))
rhatnorm = (rhat * (gammahat.pow(3) + 1) * (gammahat + 1)) / (
gammahat.pow(2) + 1
).pow(2)
array_position = (r_gam - rhatnorm).abs().argmin(dim=-1)
alpha = gam[array_position]
beta_l = (
left_std.squeeze(-1)
* (torch.lgamma(1 / alpha) - torch.lgamma(3 / alpha)).exp().sqrt()
)
beta_r = (
right_std.squeeze(-1)
* (torch.lgamma(1 / alpha) - torch.lgamma(3 / alpha)).exp().sqrt()
)
if return_sigma:
return alpha, left_std.squeeze(-1), right_std.squeeze(-1)
else:
return alpha, beta_l, beta_r
| 6,729 | 31.047619 | 103 | py |
BVQI | BVQI-master/pyiqa/archs/vsi_arch.py | r"""VSI Metric.
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/vsi.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
IQA-Optimization from https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/VSI.py
Offical matlab code is not available
"""
import functools
import warnings
from typing import Tuple, Union
import torch
import torch.nn as nn
from torch.nn.functional import avg_pool2d, interpolate, pad
from pyiqa.utils.color_util import rgb2lab, rgb2lmn
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import (
get_meshgrid,
gradient_map,
ifftshift,
safe_sqrt,
scharr_filter,
similarity_map,
)
def vsi(
x: torch.Tensor,
y: torch.Tensor,
data_range: Union[int, float] = 1.0,
c1: float = 1.27,
c2: float = 386.0,
c3: float = 130.0,
alpha: float = 0.4,
beta: float = 0.02,
omega_0: float = 0.021,
sigma_f: float = 1.34,
sigma_d: float = 145.0,
sigma_c: float = 0.001,
) -> torch.Tensor:
r"""Compute Visual Saliency-induced Index for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
data_range: Maximum value range of images (usually 1.0 or 255).
c1: coefficient to calculate saliency component of VSI.
c2: coefficient to calculate gradient component of VSI.
c3: coefficient to calculate color component of VSI.
alpha: power for gradient component of VSI.
beta: power for color component of VSI.
omega_0: coefficient to get log Gabor filter at SDSP.
sigma_f: coefficient to get log Gabor filter at SDSP.
sigma_d: coefficient to get SDSP.
sigma_c: coefficient to get SDSP.
Returns:
Index of similarity between two images. Usually in [0, 1] range.
References:
L. Zhang, Y. Shen and H. Li, "VSI: A Visual Saliency-Induced Index for Perceptual
Image Quality Assessment," IEEE Transactions on Image Processing, vol. 23, no. 10,
pp. 4270-4281, Oct. 2014, doi: 10.1109/TIP.2014.2346028
https://ieeexplore.ieee.org/document/6873260
Note:
The original method supports only RGB image.
"""
if x.size(1) == 1:
x = x.repeat(1, 3, 1, 1)
y = y.repeat(1, 3, 1, 1)
warnings.warn(
"The original VSI supports only RGB images. The input images were converted to RGB by copying "
"the grey channel 3 times."
)
# Scale to [0, 255] range to match scale of constant
x = x * 255.0 / data_range
y = y * 255.0 / data_range
vs_x = sdsp(
x,
data_range=255,
omega_0=omega_0,
sigma_f=sigma_f,
sigma_d=sigma_d,
sigma_c=sigma_c,
)
vs_y = sdsp(
y,
data_range=255,
omega_0=omega_0,
sigma_f=sigma_f,
sigma_d=sigma_d,
sigma_c=sigma_c,
)
# Convert to LMN colour space
x_lmn = rgb2lmn(x)
y_lmn = rgb2lmn(y)
# Averaging image if the size is large enough
kernel_size = max(1, round(min(vs_x.size()[-2:]) / 256))
padding = kernel_size // 2
if padding:
upper_pad = padding
bottom_pad = (kernel_size - 1) // 2
pad_to_use = [upper_pad, bottom_pad, upper_pad, bottom_pad]
mode = "replicate"
vs_x = pad(vs_x, pad=pad_to_use, mode=mode)
vs_y = pad(vs_y, pad=pad_to_use, mode=mode)
x_lmn = pad(x_lmn, pad=pad_to_use, mode=mode)
y_lmn = pad(y_lmn, pad=pad_to_use, mode=mode)
vs_x = avg_pool2d(vs_x, kernel_size=kernel_size)
vs_y = avg_pool2d(vs_y, kernel_size=kernel_size)
x_lmn = avg_pool2d(x_lmn, kernel_size=kernel_size)
y_lmn = avg_pool2d(y_lmn, kernel_size=kernel_size)
# Calculate gradient map
kernels = torch.stack([scharr_filter(), scharr_filter().transpose(1, 2)]).to(x_lmn)
gm_x = gradient_map(x_lmn[:, :1], kernels)
gm_y = gradient_map(y_lmn[:, :1], kernels)
# Calculate all similarity maps
s_vs = similarity_map(vs_x, vs_y, c1)
s_gm = similarity_map(gm_x, gm_y, c2)
s_m = similarity_map(x_lmn[:, 1:2], y_lmn[:, 1:2], c3)
s_n = similarity_map(x_lmn[:, 2:], y_lmn[:, 2:], c3)
s_c = s_m * s_n
s_c_complex = [s_c.abs(), torch.atan2(torch.zeros_like(s_c), s_c)]
s_c_complex_pow = [s_c_complex[0] ** beta, s_c_complex[1] * beta]
s_c_real_pow = s_c_complex_pow[0] * torch.cos(s_c_complex_pow[1])
s = s_vs * s_gm.pow(alpha) * s_c_real_pow
vs_max = torch.max(vs_x, vs_y)
eps = torch.finfo(vs_max.dtype).eps
output = s * vs_max
output = (
(output.sum(dim=(-1, -2)) + eps) / (vs_max.sum(dim=(-1, -2)) + eps)
).squeeze(-1)
return output
def sdsp(
x: torch.Tensor,
data_range: Union[int, float] = 255,
omega_0: float = 0.021,
sigma_f: float = 1.34,
sigma_d: float = 145.0,
sigma_c: float = 0.001,
) -> torch.Tensor:
r"""SDSP algorithm for salient region detection from a given image.
Supports only colour images with RGB channel order.
Args:
x: Tensor. Shape :math:`(N, 3, H, W)`.
data_range: Maximum value range of images (usually 1.0 or 255).
omega_0: coefficient for log Gabor filter
sigma_f: coefficient for log Gabor filter
sigma_d: coefficient for the central areas, which have a bias towards attention
sigma_c: coefficient for the warm colors, which have a bias towards attention
Returns:
torch.Tensor: Visual saliency map
"""
x = x / data_range * 255
size = x.size()
size_to_use = (256, 256)
x = interpolate(input=x, size=size_to_use, mode="bilinear", align_corners=False)
x_lab = rgb2lab(x, data_range=255)
lg = _log_gabor(size_to_use, omega_0, sigma_f).to(x).view(1, 1, *size_to_use)
# torch version >= '1.8.0'
x_fft = torch.fft.fft2(x_lab)
x_ifft_real = torch.fft.ifft2(x_fft * lg).real
s_f = safe_sqrt(x_ifft_real.pow(2).sum(dim=1, keepdim=True))
coordinates = torch.stack(get_meshgrid(size_to_use), dim=0).to(x)
coordinates = coordinates * size_to_use[0] + 1
s_d = torch.exp(-torch.sum(coordinates ** 2, dim=0) / sigma_d ** 2).view(
1, 1, *size_to_use
)
eps = torch.finfo(x_lab.dtype).eps
min_x = x_lab.min(dim=-1, keepdim=True).values.min(dim=-2, keepdim=True).values
max_x = x_lab.max(dim=-1, keepdim=True).values.max(dim=-2, keepdim=True).values
normalized = (x_lab - min_x) / (max_x - min_x + eps)
norm = normalized[:, 1:].pow(2).sum(dim=1, keepdim=True)
s_c = 1 - torch.exp(-norm / sigma_c ** 2)
vs_m = s_f * s_d * s_c
vs_m = interpolate(vs_m, size[-2:], mode="bilinear", align_corners=True)
min_vs_m = vs_m.min(dim=-1, keepdim=True).values.min(dim=-2, keepdim=True).values
max_vs_m = vs_m.max(dim=-1, keepdim=True).values.max(dim=-2, keepdim=True).values
return (vs_m - min_vs_m) / (max_vs_m - min_vs_m + eps)
def _log_gabor(size: Tuple[int, int], omega_0: float, sigma_f: float) -> torch.Tensor:
r"""Creates log Gabor filter
Args:
size: size of the requires log Gabor filter
omega_0: center frequency of the filter
sigma_f: bandwidth of the filter
Returns:
log Gabor filter
"""
xx, yy = get_meshgrid(size)
radius = (xx ** 2 + yy ** 2).sqrt()
mask = radius <= 0.5
r = radius * mask
r = ifftshift(r)
r[0, 0] = 1
lg = torch.exp((-(r / omega_0).log().pow(2)) / (2 * sigma_f ** 2))
lg[0, 0] = 0
return lg
@ARCH_REGISTRY.register()
class VSI(nn.Module):
r"""Creates a criterion that measures Visual Saliency-induced Index error between
each element in the input and target.
Args:
data_range: Maximum value range of images (usually 1.0 or 255).
c1: coefficient to calculate saliency component of VSI
c2: coefficient to calculate gradient component of VSI
c3: coefficient to calculate color component of VSI
alpha: power for gradient component of VSI
beta: power for color component of VSI
omega_0: coefficient to get log Gabor filter at SDSP
sigma_f: coefficient to get log Gabor filter at SDSP
sigma_d: coefficient to get SDSP
sigma_c: coefficient to get SDSP
References:
L. Zhang, Y. Shen and H. Li, "VSI: A Visual Saliency-Induced Index for Perceptual
Image Quality Assessment," IEEE Transactions on Image Processing, vol. 23, no. 10,
pp. 4270-4281, Oct. 2014, doi: 10.1109/TIP.2014.2346028
https://ieeexplore.ieee.org/document/6873260
"""
def __init__(
self,
c1: float = 1.27,
c2: float = 386.0,
c3: float = 130.0,
alpha: float = 0.4,
beta: float = 0.02,
data_range: Union[int, float] = 1.0,
omega_0: float = 0.021,
sigma_f: float = 1.34,
sigma_d: float = 145.0,
sigma_c: float = 0.001,
) -> None:
super().__init__()
self.data_range = data_range
self.vsi = functools.partial(
vsi,
c1=c1,
c2=c2,
c3=c3,
alpha=alpha,
beta=beta,
omega_0=omega_0,
sigma_f=sigma_f,
sigma_d=sigma_d,
sigma_c=sigma_c,
data_range=data_range,
)
def forward(self, x, y):
r"""Computation of VSI as a loss function.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of VSI loss to be minimized in [0, 1] range.
Note:
Both inputs are supposed to have RGB channels order in accordance with the original approach.
Nevertheless, the method supports greyscale images, which they are converted to RGB by copying the grey
channel 3 times.
"""
return self.vsi(x=x, y=y)
| 9,991 | 31.868421 | 115 | py |
BVQI | BVQI-master/pyiqa/archs/nrqm_arch.py | r"""NRQM Metric, proposed in
Chao Ma, Chih-Yuan Yang, Xiaokang Yang, Ming-Hsuan Yang
"Learning a No-Reference Quality Metric for Single-Image Super-Resolution"
Computer Vision and Image Understanding (CVIU), 2017
Matlab reference: https://github.com/chaoma99/sr-metric
This PyTorch implementation by: Chaofeng Chen (https://github.com/chaofengc)
"""
import math
from warnings import warn
import scipy.io
import torch
import torch.nn.functional as F
from torch import Tensor
from pyiqa.archs.arch_util import ExactPadding2d
from pyiqa.archs.func_util import extract_2d_patches
from pyiqa.archs.niqe_arch import NIQE
from pyiqa.archs.ssim_arch import SSIM
from pyiqa.matlab_utils import SCFpyr_PyTorch, dct2d, fspecial, im2col, imresize
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/NRQM_model.mat"
}
def get_guass_pyramid(x: Tensor, scale: int = 2):
r"""Get gaussian pyramid images with gaussian kernel."""
pyr = [x]
kernel = fspecial(3, 0.5, x.shape[1]).to(x)
pad_func = ExactPadding2d(3, stride=1, mode="same")
for i in range(scale):
x = F.conv2d(pad_func(x), kernel, groups=x.shape[1])
x = x[:, :, 1::2, 1::2]
pyr.append(x)
return pyr
def get_var_gen_gauss(x, eps=1e-7):
r"""Get mean and variance of input local patch."""
std = x.abs().std(dim=-1, unbiased=True)
mean = x.abs().mean(dim=-1)
rho = std / (mean + eps)
return rho
def gamma_gen_gauss(x: Tensor, block_seg=1e4):
r"""General gaussian distribution estimation.
Args:
block_seg: maximum number of blocks in parallel to avoid OOM
"""
pshape = x.shape[:-1]
x = x.reshape(-1, x.shape[-1])
eps = 1e-7
gamma = torch.arange(0.03, 10 + 0.001, 0.001).to(x)
r_table = (
torch.lgamma(1.0 / gamma)
+ torch.lgamma(3.0 / gamma)
- 2 * torch.lgamma(2.0 / gamma)
).exp()
r_table = r_table.unsqueeze(0)
mean = x.mean(dim=-1, keepdim=True)
var = x.var(dim=-1, keepdim=True, unbiased=True)
mean_abs = (x - mean).abs().mean(dim=-1, keepdim=True) ** 2
rho = var / (mean_abs + eps)
if rho.shape[0] > block_seg:
rho_seg = rho.chunk(int(rho.shape[0] // block_seg))
indexes = []
for r in rho_seg:
tmp_idx = (r - r_table).abs().argmin(dim=-1)
indexes.append(tmp_idx)
indexes = torch.cat(indexes)
else:
indexes = (rho - r_table).abs().argmin(dim=-1)
solution = gamma[indexes].reshape(*pshape)
return solution
def gamma_dct(dct_img_block: torch.Tensor):
r"""Generalized gaussian distribution features"""
b, _, _, h, w = dct_img_block.shape
dct_flatten = dct_img_block.reshape(b, -1, h * w)[:, :, 1:]
g = gamma_gen_gauss(dct_flatten)
g = torch.sort(g, dim=-1)[0]
return g
def coeff_var_dct(dct_img_block: torch.Tensor):
r"""Gaussian var, mean features"""
b, _, _, h, w = dct_img_block.shape
dct_flatten = dct_img_block.reshape(b, -1, h * w)[:, :, 1:]
rho = get_var_gen_gauss(dct_flatten)
rho = torch.sort(rho, dim=-1)[0]
return rho
def oriented_dct_rho(dct_img_block: torch.Tensor):
r"""Oriented frequency features"""
eps = 1e-8
# oriented 1
feat1 = torch.cat(
[
dct_img_block[..., 0, 1:],
dct_img_block[..., 1, 2:],
dct_img_block[..., 2, 4:],
dct_img_block[..., 3, 5:],
],
dim=-1,
).squeeze(-2)
g1 = get_var_gen_gauss(feat1, eps)
# oriented 2
feat2 = torch.cat(
[
dct_img_block[..., 1, [1]],
dct_img_block[..., 2, 2:4],
dct_img_block[..., 3, 2:5],
dct_img_block[..., 4, 3:],
dct_img_block[..., 5, 4:],
dct_img_block[..., 6, 4:],
],
dim=-1,
).squeeze(-2)
g2 = get_var_gen_gauss(feat2, eps)
# oriented 3
feat3 = torch.cat(
[
dct_img_block[..., 1:, 0],
dct_img_block[..., 2:, 1],
dct_img_block[..., 4:, 2],
dct_img_block[..., 5:, 3],
],
dim=-1,
).squeeze(-2)
g3 = get_var_gen_gauss(feat3, eps)
rho = torch.stack([g1, g2, g3], dim=-1).var(dim=-1)
rho = torch.sort(rho, dim=-1)[0]
return rho
def block_dct(img: Tensor):
r"""Get local frequency features"""
img_blocks = extract_2d_patches(img, 3 + 2 * 2, 3)
dct_img_blocks = dct2d(img_blocks)
features = []
# general gaussian distribution features
gamma_L1 = gamma_dct(dct_img_blocks)
p10_gamma_L1 = gamma_L1[:, : math.ceil(0.1 * gamma_L1.shape[-1]) + 1].mean(dim=-1)
p100_gamma_L1 = gamma_L1.mean(dim=-1)
features += [p10_gamma_L1, p100_gamma_L1]
# coefficient variation estimation
coeff_var_L1 = coeff_var_dct(dct_img_blocks)
p10_last_cv_L1 = coeff_var_L1[:, math.floor(0.9 * coeff_var_L1.shape[-1]) :].mean(
dim=-1
)
p100_cv_L1 = coeff_var_L1.mean(dim=-1)
features += [p10_last_cv_L1, p100_cv_L1]
# oriented dct features
ori_dct_feat = oriented_dct_rho(dct_img_blocks)
p10_last_orientation_L1 = ori_dct_feat[
:, math.floor(0.9 * ori_dct_feat.shape[-1]) :
].mean(dim=-1)
p100_orientation_L1 = ori_dct_feat.mean(dim=-1)
features += [p10_last_orientation_L1, p100_orientation_L1]
dct_feat = torch.stack(features, dim=1)
return dct_feat
def norm_sender_normalized(pyr, num_scale=2, num_bands=6, blksz=3, eps=1e-12):
r"""Normalize pyramid with local spatial neighbor and band neighbor"""
border = blksz // 2
guardband = 16
subbands = []
for si in range(num_scale):
for bi in range(num_bands):
idx = si * num_bands + bi
current_band = pyr[idx]
N = blksz ** 2
# 3x3 window pixels
tmp = F.unfold(current_band.unsqueeze(1), 3, stride=1)
tmp = tmp.transpose(1, 2)
b, hw = tmp.shape[:2]
# parent pixels
parent_idx = idx + num_bands
if parent_idx < len(pyr):
tmp_parent = pyr[parent_idx]
tmp_parent = imresize(tmp_parent, sizes=current_band.shape[-2:])
tmp_parent = tmp_parent[:, border:-border, border:-border].reshape(
b, hw, 1
)
tmp = torch.cat((tmp, tmp_parent), dim=-1)
N += 1
# neighbor band pixels
for ni in range(num_bands):
if ni != bi:
ni_idx = si * num_bands + ni
tmp_nei = pyr[ni_idx]
tmp_nei = tmp_nei[:, border:-border, border:-border].reshape(
b, hw, 1
)
tmp = torch.cat((tmp, tmp_nei), dim=-1)
C_x = tmp.transpose(1, 2) @ tmp / tmp.shape[1]
# correct possible negative eigenvalue
L, Q = torch.linalg.eigh(C_x)
L_pos = L * (L > 0)
L_pos_sum = L_pos.sum(dim=1, keepdim=True)
L = (
L_pos
* L.sum(dim=1, keepdim=True)
/ (L_pos_sum + (L_pos_sum == 0).float())
)
C_x = Q @ torch.diag_embed(L) @ Q.transpose(1, 2)
o_c = current_band[:, border:-border, border:-border]
b, h, w = o_c.shape
o_c = o_c.reshape(b, hw)
o_c = o_c - o_c.mean(dim=1, keepdim=True)
if hasattr(torch.linalg, "lstsq"):
tmp_y = (
torch.linalg.lstsq(
C_x.transpose(1, 2), tmp.transpose(1, 2)
).solution.transpose(1, 2)
* tmp
/ N
)
else:
warn(
"For numerical stability, we use torch.linal.lstsq to calculate matrix inverse for PyTorch > 1.9.0. The results might be slightly different if you use older version of PyTorch."
)
tmp_y = (tmp @ torch.linalg.pinv(C_x)) * tmp / N
z = tmp_y.sum(dim=2).sqrt()
mask = z != 0
g_c = o_c * mask / (z * mask + eps)
g_c = g_c.reshape(b, h, w)
gb = int(guardband / (2 ** (si)))
g_c = g_c[:, gb:-gb, gb:-gb]
g_c = g_c - g_c.mean(dim=(1, 2), keepdim=True)
subbands.append(g_c)
return subbands
def global_gsm(img: Tensor):
"""Global feature from gassian scale mixture model"""
batch_size = img.shape[0]
num_bands = 6
pyr = SCFpyr_PyTorch(height=2, nbands=num_bands, device=img.device).build(img)
lp_bands = [x[..., 0] for x in pyr[1]] + [x[..., 0] for x in pyr[2]]
subbands = norm_sender_normalized(lp_bands)
feat = []
# gamma
for sb in subbands:
feat.append(gamma_gen_gauss(sb.reshape(batch_size, -1)))
# gamma cross scale
for i in range(num_bands):
sb1 = subbands[i].reshape(batch_size, -1)
sb2 = subbands[i + num_bands].reshape(batch_size, -1)
gs = gamma_gen_gauss(torch.cat((sb1, sb2), dim=1))
feat.append(gs)
# structure correlation between scales
hp_band = pyr[0]
ssim_func = SSIM(channels=1, test_y_channel=False)
for sb in subbands:
sb_tmp = imresize(sb, sizes=hp_band.shape[1:]).unsqueeze(1)
tmp_ssim = ssim_func(sb_tmp, hp_band.unsqueeze(1))
feat.append(tmp_ssim)
# structure correlation between orientations
for i in range(num_bands):
for j in range(i + 1, num_bands):
feat.append(ssim_func(subbands[i].unsqueeze(1), subbands[j].unsqueeze(1)))
feat = torch.stack(feat, dim=1)
return feat
def tree_regression(feat, ldau, rdau, threshold_value, pred_value, best_attri):
r"""Simple decision tree regression."""
prev_k = k = 0
for i in range(ldau.shape[0]):
best_col = best_attri[k] - 1
threshold = threshold_value[k]
key_value = feat[best_col]
prev_k = k
k = ldau[k] - 1 if key_value <= threshold else rdau[k] - 1
if k == -1:
break
y_pred = pred_value[prev_k]
return y_pred
def random_forest_regression(feat, ldau, rdau, threshold_value, pred_value, best_attri):
r"""Simple random forest regression.
Note: currently, this is non-differentiable and only support CPU.
"""
feat = feat.cpu().data.numpy()
b, dim = feat.shape
node_num, tree_num = ldau.shape
pred = []
for i in range(b):
tmp_feat = feat[i]
tmp_pred = []
for i in range(tree_num):
tmp_result = tree_regression(
tmp_feat,
ldau[:, i],
rdau[:, i],
threshold_value[:, i],
pred_value[:, i],
best_attri[:, i],
)
tmp_pred.append(tmp_result)
pred.append(tmp_pred)
pred = torch.Tensor(pred)
return pred.mean(dim=1, keepdim=True)
def nrqm(
img: Tensor,
linear_param,
rf_param,
) -> Tensor:
"""Calculate NRQM
Args:
img (Tensor): Input image.
linear_param (np.array): (4, 1) linear regression params
rf_param: params of 3 random forest for 3 kinds of features
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
# crop image
b, c, h, w = img.shape
img_pyr = get_guass_pyramid(img.float() / 255.0)
# DCT features
f1 = []
for im in img_pyr:
f1.append(block_dct(im))
f1 = torch.cat(f1, dim=1)
# gsm features
f2 = global_gsm(img)
# svd features
f3 = []
for im in img_pyr:
col = im2col(im, 5, "distinct")
_, s, _ = torch.linalg.svd(col, full_matrices=False)
f3.append(s)
f3 = torch.cat(f3, dim=1)
# Random forest regression. Currently not differentiable and only support CPU
preds = torch.ones(b, 1)
for feat, rf in zip([f1, f2, f3], rf_param):
tmp_pred = random_forest_regression(feat, *rf)
preds = torch.cat((preds, tmp_pred), dim=1)
quality = preds @ torch.Tensor(linear_param)
return quality.squeeze()
def calculate_nrqm(
img: torch.Tensor,
crop_border: int = 0,
test_y_channel: bool = True,
pretrained_model_path: str = None,
color_space: str = "yiq",
**kwargs,
) -> torch.Tensor:
"""Calculate NRQM
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
test_y_channel (Bool): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
pretrained_model_path (String): The pretrained model path.
Returns:
Tensor: NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)["model"]
linear_param = params["linear"][0, 0]
rf_params_list = []
for i in range(3):
tmp_list = []
tmp_param = params["rf"][0, 0][0, i][0, 0]
tmp_list.append(tmp_param[0]) # ldau
tmp_list.append(tmp_param[1]) # rdau
tmp_list.append(tmp_param[4]) # threshold value
tmp_list.append(tmp_param[5]) # pred value
tmp_list.append(tmp_param[6]) # best attribute index
rf_params_list.append(tmp_list)
if test_y_channel and img.shape[1] == 3:
img = to_y_channel(img, 255, color_space)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
nrqm_result = nrqm(img, linear_param, rf_params_list)
return nrqm_result.to(img)
@ARCH_REGISTRY.register()
class NRQM(torch.nn.Module):
r"""NRQM metric
Ma, Chao, Chih-Yuan Yang, Xiaokang Yang, and Ming-Hsuan Yang.
"Learning a no-reference quality metric for single-image super-resolution."
Computer Vision and Image Understanding 158 (2017): 1-16.
Args:
channels (int): Number of processed channel.
test_y_channel (Boolean): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (String): The pretrained model path.
"""
def __init__(
self,
test_y_channel: bool = True,
color_space: str = "yiq",
crop_border: int = 0,
pretrained_model_path: str = None,
) -> None:
super(NRQM, self).__init__()
self.test_y_channel = test_y_channel
self.crop_border = crop_border
self.color_space = color_space
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NRQM metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of nrqm metric.
"""
score = calculate_nrqm(
X,
self.crop_border,
self.test_y_channel,
self.pretrained_model_path,
self.color_space,
)
return score
@ARCH_REGISTRY.register()
class PI(torch.nn.Module):
r"""Perceptual Index (PI), introduced by
Blau, Yochai, Roey Mechrez, Radu Timofte, Tomer Michaeli, and Lihi Zelnik-Manor.
"The 2018 pirm challenge on perceptual image super-resolution."
In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pp. 0-0. 2018.
Ref url: https://github.com/roimehrez/PIRM2018
It is a combination of NIQE and NRQM: 1/2 * ((10 - NRQM) + NIQE)
Args:
color_space (str): color space of y channel, default ycbcr.
crop_border (int): Cropped pixels in each edge of an image, default 4.
"""
def __init__(self, crop_border=4, color_space="ycbcr"):
super(PI, self).__init__()
self.nrqm = NRQM(crop_border=crop_border, color_space=color_space)
self.niqe = NIQE(crop_border=crop_border, color_space=color_space)
def forward(self, X: Tensor) -> Tensor:
r"""Computation of PI metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of PI metric.
"""
nrqm_score = self.nrqm(X)
niqe_score = self.niqe(X)
score = 1 / 2 * (10 - nrqm_score + niqe_score)
return score
| 16,689 | 31.157996 | 197 | py |
BVQI | BVQI-master/pyiqa/archs/nima_arch.py | r"""NIMA model.
Reference:
Talebi, Hossein, and Peyman Milanfar. "NIMA: Neural image assessment."
IEEE transactions on image processing 27, no. 8 (2018): 3998-4011.
Created by: https://github.com/yunxiaoshi/Neural-IMage-Assessment/blob/master/model/model.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import timm
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from pyiqa.archs.arch_util import dist_to_mos, load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"vgg16-ava": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/NIMA_VGG16_ava-dc4e8265.pth",
"inception_resnet_v2-ava": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/NIMA_InceptionV2_ava-b0c77c00.pth",
}
@ARCH_REGISTRY.register()
class NIMA(nn.Module):
"""Neural IMage Assessment model.
Modification:
- for simplicity, we use global average pool for all models
- we remove the dropout, because parameters with avg pool is much less.
Args:
base_model_name: pretrained model to extract features, can be any models supported by timm.
Models used in the paper: vgg16, inception_resnet_v2, mobilenetv2_100
default input shape:
- vgg and mobilenet: (N, 3, 224, 224)
- inception: (N, 3, 299, 299)
"""
def __init__(
self,
base_model_name="vgg16",
num_classes=10,
dropout_rate=0.0,
pretrained=True,
pretrained_model_path=None,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
):
super(NIMA, self).__init__()
self.base_model = timm.create_model(
base_model_name, pretrained=True, features_only=True
)
# set output number of classes
num_classes = 10 if "ava" in pretrained else num_classes
self.global_pool = nn.AdaptiveAvgPool2d(1)
in_ch = self.base_model.feature_info.channels()[-1]
self.num_classes = num_classes
self.classifier = [
nn.Flatten(),
nn.Dropout(p=dropout_rate),
nn.Linear(in_features=in_ch, out_features=num_classes),
]
if num_classes > 1:
self.classifier.append(nn.Softmax(dim=-1))
self.classifier = nn.Sequential(*self.classifier)
if "inception" in base_model_name:
default_mean = IMAGENET_INCEPTION_MEAN
default_std = IMAGENET_INCEPTION_STD
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained and pretrained_model_path is None:
url_key = f"{base_model_name}-{pretrained}"
load_pretrained_network(
self, default_model_urls[url_key], True, weight_keys="params"
)
elif pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, True, weight_keys="params"
)
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
def forward(self, x, return_mos=True, return_dist=False):
r"""Computation image quality using NIMA.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
return_mos: Whether to return mos_score.
retuen_dist: Whether to return dist_score.
"""
# imagenet normalization of input is hard coded
x = self.preprocess(x)
x = self.base_model(x)[-1]
x = self.global_pool(x)
dist = self.classifier(x)
mos = dist_to_mos(dist)
return_list = []
if return_mos:
return_list.append(mos)
if return_dist:
return_list.append(dist)
if len(return_list) > 1:
return return_list
else:
return return_list[0]
| 4,045 | 33 | 139 | py |
BVQI | BVQI-master/pyiqa/archs/vif_arch.py | r"""VIF Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/VIF.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Matlab code from http://live.ece.utexas.edu/research/Quality/vifvec_release.zip;
"""
import numpy as np
import torch
from torch.nn import functional as F
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def sp5_filters():
r"""Define spatial filters."""
filters = {}
filters["harmonics"] = np.array([1, 3, 5])
filters["mtx"] = np.array(
[
[0.3333, 0.2887, 0.1667, 0.0000, -0.1667, -0.2887],
[0.0000, 0.1667, 0.2887, 0.3333, 0.2887, 0.1667],
[0.3333, -0.0000, -0.3333, -0.0000, 0.3333, -0.0000],
[0.0000, 0.3333, 0.0000, -0.3333, 0.0000, 0.3333],
[0.3333, -0.2887, 0.1667, -0.0000, -0.1667, 0.2887],
[-0.0000, 0.1667, -0.2887, 0.3333, -0.2887, 0.1667],
]
)
filters["hi0filt"] = np.array(
[
[
-0.00033429,
-0.00113093,
-0.00171484,
-0.00133542,
-0.00080639,
-0.00133542,
-0.00171484,
-0.00113093,
-0.00033429,
],
[
-0.00113093,
-0.00350017,
-0.00243812,
0.00631653,
0.01261227,
0.00631653,
-0.00243812,
-0.00350017,
-0.00113093,
],
[
-0.00171484,
-0.00243812,
-0.00290081,
-0.00673482,
-0.00981051,
-0.00673482,
-0.00290081,
-0.00243812,
-0.00171484,
],
[
-0.00133542,
0.00631653,
-0.00673482,
-0.07027679,
-0.11435863,
-0.07027679,
-0.00673482,
0.00631653,
-0.00133542,
],
[
-0.00080639,
0.01261227,
-0.00981051,
-0.11435863,
0.81380200,
-0.11435863,
-0.00981051,
0.01261227,
-0.00080639,
],
[
-0.00133542,
0.00631653,
-0.00673482,
-0.07027679,
-0.11435863,
-0.07027679,
-0.00673482,
0.00631653,
-0.00133542,
],
[
-0.00171484,
-0.00243812,
-0.00290081,
-0.00673482,
-0.00981051,
-0.00673482,
-0.00290081,
-0.00243812,
-0.00171484,
],
[
-0.00113093,
-0.00350017,
-0.00243812,
0.00631653,
0.01261227,
0.00631653,
-0.00243812,
-0.00350017,
-0.00113093,
],
[
-0.00033429,
-0.00113093,
-0.00171484,
-0.00133542,
-0.00080639,
-0.00133542,
-0.00171484,
-0.00113093,
-0.00033429,
],
]
)
filters["lo0filt"] = np.array(
[
[0.00341614, -0.01551246, -0.03848215, -0.01551246, 0.00341614],
[-0.01551246, 0.05586982, 0.15925570, 0.05586982, -0.01551246],
[-0.03848215, 0.15925570, 0.40304148, 0.15925570, -0.03848215],
[-0.01551246, 0.05586982, 0.15925570, 0.05586982, -0.01551246],
[0.00341614, -0.01551246, -0.03848215, -0.01551246, 0.00341614],
]
)
filters["lofilt"] = 2 * np.array(
[
[
0.00085404,
-0.00244917,
-0.00387812,
-0.00944432,
-0.00962054,
-0.00944432,
-0.00387812,
-0.00244917,
0.00085404,
],
[
-0.00244917,
-0.00523281,
-0.00661117,
0.00410600,
0.01002988,
0.00410600,
-0.00661117,
-0.00523281,
-0.00244917,
],
[
-0.00387812,
-0.00661117,
0.01396746,
0.03277038,
0.03981393,
0.03277038,
0.01396746,
-0.00661117,
-0.00387812,
],
[
-0.00944432,
0.00410600,
0.03277038,
0.06426333,
0.08169618,
0.06426333,
0.03277038,
0.00410600,
-0.00944432,
],
[
-0.00962054,
0.01002988,
0.03981393,
0.08169618,
0.10096540,
0.08169618,
0.03981393,
0.01002988,
-0.00962054,
],
[
-0.00944432,
0.00410600,
0.03277038,
0.06426333,
0.08169618,
0.06426333,
0.03277038,
0.00410600,
-0.00944432,
],
[
-0.00387812,
-0.00661117,
0.01396746,
0.03277038,
0.03981393,
0.03277038,
0.01396746,
-0.00661117,
-0.00387812,
],
[
-0.00244917,
-0.00523281,
-0.00661117,
0.00410600,
0.01002988,
0.00410600,
-0.00661117,
-0.00523281,
-0.00244917,
],
[
0.00085404,
-0.00244917,
-0.00387812,
-0.00944432,
-0.00962054,
-0.00944432,
-0.00387812,
-0.00244917,
0.00085404,
],
]
)
filters["bfilts"] = np.array(
[
[
0.00277643,
0.00496194,
0.01026699,
0.01455399,
0.01026699,
0.00496194,
0.00277643,
-0.00986904,
-0.00893064,
0.01189859,
0.02755155,
0.01189859,
-0.00893064,
-0.00986904,
-0.01021852,
-0.03075356,
-0.08226445,
-0.11732297,
-0.08226445,
-0.03075356,
-0.01021852,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.01021852,
0.03075356,
0.08226445,
0.11732297,
0.08226445,
0.03075356,
0.01021852,
0.00986904,
0.00893064,
-0.01189859,
-0.02755155,
-0.01189859,
0.00893064,
0.00986904,
-0.00277643,
-0.00496194,
-0.01026699,
-0.01455399,
-0.01026699,
-0.00496194,
-0.00277643,
],
[
-0.00343249,
-0.00640815,
-0.00073141,
0.01124321,
0.00182078,
0.00285723,
0.01166982,
-0.00358461,
-0.01977507,
-0.04084211,
-0.00228219,
0.03930573,
0.01161195,
0.00128000,
0.01047717,
0.01486305,
-0.04819057,
-0.12227230,
-0.05394139,
0.00853965,
-0.00459034,
0.00790407,
0.04435647,
0.09454202,
-0.00000000,
-0.09454202,
-0.04435647,
-0.00790407,
0.00459034,
-0.00853965,
0.05394139,
0.12227230,
0.04819057,
-0.01486305,
-0.01047717,
-0.00128000,
-0.01161195,
-0.03930573,
0.00228219,
0.04084211,
0.01977507,
0.00358461,
-0.01166982,
-0.00285723,
-0.00182078,
-0.01124321,
0.00073141,
0.00640815,
0.00343249,
],
[
0.00343249,
0.00358461,
-0.01047717,
-0.00790407,
-0.00459034,
0.00128000,
0.01166982,
0.00640815,
0.01977507,
-0.01486305,
-0.04435647,
0.00853965,
0.01161195,
0.00285723,
0.00073141,
0.04084211,
0.04819057,
-0.09454202,
-0.05394139,
0.03930573,
0.00182078,
-0.01124321,
0.00228219,
0.12227230,
-0.00000000,
-0.12227230,
-0.00228219,
0.01124321,
-0.00182078,
-0.03930573,
0.05394139,
0.09454202,
-0.04819057,
-0.04084211,
-0.00073141,
-0.00285723,
-0.01161195,
-0.00853965,
0.04435647,
0.01486305,
-0.01977507,
-0.00640815,
-0.01166982,
-0.00128000,
0.00459034,
0.00790407,
0.01047717,
-0.00358461,
-0.00343249,
],
[
-0.00277643,
0.00986904,
0.01021852,
-0.00000000,
-0.01021852,
-0.00986904,
0.00277643,
-0.00496194,
0.00893064,
0.03075356,
-0.00000000,
-0.03075356,
-0.00893064,
0.00496194,
-0.01026699,
-0.01189859,
0.08226445,
-0.00000000,
-0.08226445,
0.01189859,
0.01026699,
-0.01455399,
-0.02755155,
0.11732297,
-0.00000000,
-0.11732297,
0.02755155,
0.01455399,
-0.01026699,
-0.01189859,
0.08226445,
-0.00000000,
-0.08226445,
0.01189859,
0.01026699,
-0.00496194,
0.00893064,
0.03075356,
-0.00000000,
-0.03075356,
-0.00893064,
0.00496194,
-0.00277643,
0.00986904,
0.01021852,
-0.00000000,
-0.01021852,
-0.00986904,
0.00277643,
],
[
-0.01166982,
-0.00128000,
0.00459034,
0.00790407,
0.01047717,
-0.00358461,
-0.00343249,
-0.00285723,
-0.01161195,
-0.00853965,
0.04435647,
0.01486305,
-0.01977507,
-0.00640815,
-0.00182078,
-0.03930573,
0.05394139,
0.09454202,
-0.04819057,
-0.04084211,
-0.00073141,
-0.01124321,
0.00228219,
0.12227230,
-0.00000000,
-0.12227230,
-0.00228219,
0.01124321,
0.00073141,
0.04084211,
0.04819057,
-0.09454202,
-0.05394139,
0.03930573,
0.00182078,
0.00640815,
0.01977507,
-0.01486305,
-0.04435647,
0.00853965,
0.01161195,
0.00285723,
0.00343249,
0.00358461,
-0.01047717,
-0.00790407,
-0.00459034,
0.00128000,
0.01166982,
],
[
-0.01166982,
-0.00285723,
-0.00182078,
-0.01124321,
0.00073141,
0.00640815,
0.00343249,
-0.00128000,
-0.01161195,
-0.03930573,
0.00228219,
0.04084211,
0.01977507,
0.00358461,
0.00459034,
-0.00853965,
0.05394139,
0.12227230,
0.04819057,
-0.01486305,
-0.01047717,
0.00790407,
0.04435647,
0.09454202,
-0.00000000,
-0.09454202,
-0.04435647,
-0.00790407,
0.01047717,
0.01486305,
-0.04819057,
-0.12227230,
-0.05394139,
0.00853965,
-0.00459034,
-0.00358461,
-0.01977507,
-0.04084211,
-0.00228219,
0.03930573,
0.01161195,
0.00128000,
-0.00343249,
-0.00640815,
-0.00073141,
0.01124321,
0.00182078,
0.00285723,
0.01166982,
],
]
).T
return filters
def corrDn(image, filt, step=1, channels=1):
r"""Compute correlation of image with FILT, followed by downsampling.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
filt: A filter.
step: Downsampling factors.
channels: Number of channels.
"""
filt_ = (
torch.from_numpy(filt)
.float()
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(image.device)
)
p = (filt_.shape[2] - 1) // 2
image = F.pad(image, (p, p, p, p), "reflect")
img = F.conv2d(image, filt_, stride=step, padding=0, groups=channels)
return img
def SteerablePyramidSpace(image, height=4, order=5, channels=1):
r"""Construct a steerable pyramid on image.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
height (int): Number of pyramid levels to build.
order (int): Number of orientations.
channels (int): Number of channels.
"""
num_orientations = order + 1
filters = sp5_filters()
hi0 = corrDn(image, filters["hi0filt"], step=1, channels=channels)
pyr_coeffs = []
pyr_coeffs.append(hi0)
lo = corrDn(image, filters["lo0filt"], step=1, channels=channels)
for _ in range(height):
bfiltsz = int(np.floor(np.sqrt(filters["bfilts"].shape[0])))
for b in range(num_orientations):
filt = filters["bfilts"][:, b].reshape(bfiltsz, bfiltsz).T
band = corrDn(lo, filt, step=1, channels=channels)
pyr_coeffs.append(band)
lo = corrDn(lo, filters["lofilt"], step=2, channels=channels)
pyr_coeffs.append(lo)
return pyr_coeffs
@ARCH_REGISTRY.register()
class VIF(torch.nn.Module):
r"""Image Information and Visual Quality metric
Args:
channels (int): Number of channels.
level (int): Number of levels to build.
ori (int): Number of orientations.
Reference:
Sheikh, Hamid R., and Alan C. Bovik. "Image information and visual quality."
IEEE Transactions on image processing 15, no. 2 (2006): 430-444.
"""
def __init__(self, channels=1, level=4, ori=6):
super(VIF, self).__init__()
self.ori = ori - 1
self.level = level
self.channels = channels
self.M = 3
self.subbands = [4, 7, 10, 13, 16, 19, 22, 25]
self.sigma_nsq = 0.4
self.tol = 1e-12
def corrDn_win(self, image, filt, step=1, channels=1, start=[0, 0], end=[0, 0]):
r"""Compute correlation of image with FILT using window, followed by downsampling.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
filt: A filter.
step (int): Downsampling factors.
channels (int): Number of channels.
start (list): The window over which the convolution occurs.
end (list): The window over which the convolution occurs.
"""
filt_ = (
torch.from_numpy(filt)
.float()
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(image.device)
)
p = (filt_.shape[2] - 1) // 2
image = F.pad(image, (p, p, p, p), "reflect")
img = F.conv2d(image, filt_, stride=1, padding=0, groups=channels)
img = img[:, :, start[0] : end[0] : step, start[1] : end[1] : step]
return img
def vifsub_est_M(self, org, dist):
r"""Calculate the parameters of the distortion channel.
Args:
org: A reference tensor. Shape :math:`(N, C, H, W)`.
dist: A distortion tensor. Shape :math:`(N, C, H, W)`.
"""
g_all = []
vv_all = []
for i in range(len(self.subbands)):
sub = self.subbands[i] - 1
y = org[sub]
yn = dist[sub]
lev = np.ceil((sub - 1) / 6)
winsize = int(2 ** lev + 1)
win = np.ones((winsize, winsize))
newsizeX = int(np.floor(y.shape[2] / self.M) * self.M)
newsizeY = int(np.floor(y.shape[3] / self.M) * self.M)
y = y[:, :, :newsizeX, :newsizeY]
yn = yn[:, :, :newsizeX, :newsizeY]
winstart = [int(1 * np.floor(self.M / 2)), int(1 * np.floor(self.M / 2))]
winend = [
int(y.shape[2] - np.ceil(self.M / 2)) + 1,
int(y.shape[3] - np.ceil(self.M / 2)) + 1,
]
mean_x = self.corrDn_win(
y,
win / (winsize ** 2),
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
mean_y = self.corrDn_win(
yn,
win / (winsize ** 2),
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
cov_xy = (
self.corrDn_win(
y * yn,
win,
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
- (winsize ** 2) * mean_x * mean_y
)
ss_x = (
self.corrDn_win(
y ** 2,
win,
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
- (winsize ** 2) * mean_x ** 2
)
ss_y = (
self.corrDn_win(
yn ** 2,
win,
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
- (winsize ** 2) * mean_y ** 2
)
ss_x = F.relu(ss_x)
ss_y = F.relu(ss_y)
g = cov_xy / (ss_x + self.tol)
vv = (ss_y - g * cov_xy) / (winsize ** 2)
g = g.masked_fill(ss_x < self.tol, 0)
vv[ss_x < self.tol] = ss_y[ss_x < self.tol]
ss_x = ss_x.masked_fill(ss_x < self.tol, 0)
g = g.masked_fill(ss_y < self.tol, 0)
vv = vv.masked_fill(ss_y < self.tol, 0)
vv[g < 0] = ss_y[g < 0]
g = F.relu(g)
vv = vv.masked_fill(vv < self.tol, self.tol)
g_all.append(g)
vv_all.append(vv)
return g_all, vv_all
def refparams_vecgsm(self, org):
r"""Calculate the parameters of the reference image.
Args:
org: A reference tensor. Shape :math:`(N, C, H, W)`.
"""
ssarr, l_arr, cu_arr = [], [], []
for i in range(len(self.subbands)):
sub = self.subbands[i] - 1
y = org[sub]
M = self.M
newsizeX = int(np.floor(y.shape[2] / M) * M)
newsizeY = int(np.floor(y.shape[3] / M) * M)
y = y[:, :, :newsizeX, :newsizeY]
B, C, H, W = y.shape
temp = []
for j in range(M):
for k in range(M):
temp.append(
y[:, :, k : H - (M - k) + 1, j : W - (M - j) + 1].reshape(
B, C, -1
)
)
temp = torch.stack(temp, dim=3)
mcu = torch.mean(temp, dim=2).unsqueeze(2).repeat(1, 1, temp.shape[2], 1)
cu = (
torch.matmul((temp - mcu).permute(0, 1, 3, 2), temp - mcu)
/ temp.shape[2]
)
temp = []
for j in range(M):
for k in range(M):
temp.append(y[:, :, k : H + 1 : M, j : W + 1 : M].reshape(B, C, -1))
temp = torch.stack(temp, dim=2)
ss = torch.matmul(torch.pinverse(cu), temp)
ss = torch.sum(ss * temp, dim=2) / (M * M)
ss = ss.reshape(B, C, H // M, W // M)
v, _ = torch.linalg.eigh(cu, UPLO="U")
l_arr.append(v)
ssarr.append(ss)
cu_arr.append(cu)
return ssarr, l_arr, cu_arr
def vif(self, x, y):
r"""VIF metric. Order of input is important.
Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
"""
# Convert RGB image to YCBCR and use the Y-channel.
x = to_y_channel(x, 255)
y = to_y_channel(y, 255)
sp_x = SteerablePyramidSpace(
x, height=self.level, order=self.ori, channels=self.channels
)[::-1]
sp_y = SteerablePyramidSpace(
y, height=self.level, order=self.ori, channels=self.channels
)[::-1]
g_all, vv_all = self.vifsub_est_M(sp_y, sp_x)
ss_arr, l_arr, cu_arr = self.refparams_vecgsm(sp_y)
num, den = [], []
for i in range(len(self.subbands)):
sub = self.subbands[i]
g = g_all[i]
vv = vv_all[i]
ss = ss_arr[i]
lamda = l_arr[i]
neigvals = lamda.shape[2]
lev = np.ceil((sub - 1) / 6)
winsize = 2 ** lev + 1
offset = (winsize - 1) / 2
offset = int(np.ceil(offset / self.M))
_, _, H, W = g.shape
g = g[:, :, offset : H - offset, offset : W - offset]
vv = vv[:, :, offset : H - offset, offset : W - offset]
ss = ss[:, :, offset : H - offset, offset : W - offset]
temp1 = 0
temp2 = 0
for j in range(neigvals):
cc = lamda[:, :, j].unsqueeze(2).unsqueeze(3)
temp1 = temp1 + torch.sum(
torch.log2(1 + g * g * ss * cc / (vv + self.sigma_nsq)), dim=[2, 3]
)
temp2 = temp2 + torch.sum(
torch.log2(1 + ss * cc / (self.sigma_nsq)), dim=[2, 3]
)
num.append(temp1.mean(1))
den.append(temp2.mean(1))
return torch.stack(num, dim=1).sum(1) / (torch.stack(den, dim=1).sum(1) + 1e-12)
def forward(self, X, Y):
r"""Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
Order of input is important.
"""
assert (
X.shape == Y.shape
), "Input and reference images should have the same shape, but got"
f"{X.shape} and {Y.shape}"
score = self.vif(X, Y)
return score
| 25,964 | 28.913594 | 90 | py |
BVQI | BVQI-master/pyiqa/archs/fid_arch.py | """FID and clean-fid metric
Codes are borrowed from the clean-fid project:
- https://github.com/GaParmar/clean-fid
Ref:
[1] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium.
Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter
NeurIPS, 2017
[2] On Aliased Resizing and Surprising Subtleties in GAN Evaluation
Gaurav Parmar, Richard Zhang, Jun-Yan Zhu
CVPR, 2022
"""
import os
from email.policy import default
from glob import glob
import numpy as np
import torch
import torchvision
from PIL import Image
from scipy import linalg
from torch import nn
from tqdm import tqdm
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.img_util import is_image_file
from pyiqa.utils.registry import ARCH_REGISTRY
from .inception import InceptionV3
default_model_urls = {
"ffhq_clean_trainval70k_512.npz": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ffhq_clean_trainval70k_512.npz",
"ffhq_clean_trainval70k_512_kid.npz": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ffhq_clean_trainval70k_512_kid.npz",
}
class ResizeDataset(torch.utils.data.Dataset):
"""
A placeholder Dataset that enables parallelizing the resize operation
using multiple CPU cores
files: list of all files in the folder
mode:
- clean: use PIL resize before calculate features
- legacy_pytorch: do not resize here, but before pytorch model
"""
def __init__(self, files, mode, size=(299, 299)):
self.files = files
self.transforms = torchvision.transforms.ToTensor()
self.size = size
self.mode = mode
def __len__(self):
return len(self.files)
def __getitem__(self, i):
path = str(self.files[i])
img_pil = Image.open(path).convert("RGB")
if self.mode == "clean":
def resize_single_channel(x_np):
img = Image.fromarray(x_np.astype(np.float32), mode="F")
img = img.resize(self.size, resample=Image.BICUBIC)
return np.asarray(img).clip(0, 255).reshape(*self.size, 1)
img_np = np.array(img_pil)
img_np = [resize_single_channel(img_np[:, :, idx]) for idx in range(3)]
img_np = np.concatenate(img_np, axis=2).astype(np.float32)
img_np = (img_np - 128) / 128
img_t = torch.tensor(img_np).permute(2, 0, 1)
else:
img_np = np.array(img_pil).clip(0, 255)
img_t = self.transforms(img_np)
return img_t
def get_reference_statistics(name, res, mode="clean", split="test", metric="FID"):
r"""
Load precomputed reference statistics for commonly used datasets
"""
base_url = "https://www.cs.cmu.edu/~clean-fid/stats"
if split == "custom":
res = "na"
if metric == "FID":
rel_path = (f"{name}_{mode}_{split}_{res}.npz").lower()
url = f"{base_url}/{rel_path}"
if rel_path in default_model_urls.keys():
fpath = load_file_from_url(default_model_urls[rel_path])
else:
fpath = load_file_from_url(url)
stats = np.load(fpath)
mu, sigma = stats["mu"], stats["sigma"]
return mu, sigma
elif metric == "KID":
rel_path = (f"{name}_{mode}_{split}_{res}_kid.npz").lower()
url = f"{base_url}/{rel_path}"
if rel_path in default_model_urls.keys():
fpath = load_file_from_url(default_model_urls[rel_path])
else:
fpath = load_file_from_url(url)
stats = np.load(fpath)
return stats["feats"]
def frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""
Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Danica J. Sutherland.
Params:
mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
mu2 : The sample mean over activations, precalculated on an
representative data set.
sigma1: The covariance matrix over activations for generated samples.
sigma2: The covariance matrix over activations, precalculated on an
representative data set.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; "
"adding %s to diagonal of cov estimates"
) % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def kernel_distance(feats1, feats2, num_subsets=100, max_subset_size=1000):
r"""
Compute the KID score given the sets of features
"""
n = feats1.shape[1]
m = min(min(feats1.shape[0], feats2.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = feats2[np.random.choice(feats2.shape[0], m, replace=False)]
y = feats1[np.random.choice(feats1.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
def get_folder_features(
fdir,
model=None,
num_workers=12,
batch_size=32,
device=torch.device("cuda"),
mode="clean",
description="",
verbose=True,
):
r"""
Compute the inception features for a folder of image files
"""
files = sorted(
[file for file in glob(os.path.join(fdir, "*")) if is_image_file(file)]
)
if verbose:
print(f"Found {len(files)} images in the folder {fdir}")
dataset = ResizeDataset(files, mode=mode)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
# collect all inception features
if verbose:
pbar = tqdm(dataloader, desc=description)
else:
pbar = dataloader
if mode == "clean":
resize_input = normalize_input = False
else:
resize_input = normalize_input = True
l_feats = []
with torch.no_grad():
for batch in pbar:
feat = model(batch.to(device), resize_input, normalize_input)
feat = feat[0].squeeze(-1).squeeze(-1).detach().cpu().numpy()
l_feats.append(feat)
np_feats = np.concatenate(l_feats)
return np_feats
@ARCH_REGISTRY.register()
class FID(nn.Module):
"""FID and Clean-FID metric
Args:
mode: [clean, legacy_pytorch]. Default: clean
"""
def __init__(
self,
dims=2048,
) -> None:
super().__init__()
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
self.model = InceptionV3(output_blocks=[block_idx])
self.model.eval()
def forward(
self,
fdir1=None,
fdir2=None,
mode="clean",
dataset_name="FFHQ",
dataset_res=1024,
dataset_split="train",
num_workers=12,
batch_size=32,
device=torch.device("cuda"),
verbose=True,
):
assert mode in [
"clean",
"legacy_pytorch",
"legacy_tensorflow",
], "Invalid calculation mode, should be in [clean, legacy_pytorch, legacy_tensorflow]"
# if both dirs are specified, compute FID between folders
if fdir1 is not None and fdir2 is not None:
if not verbose:
print("compute FID between two folders")
fbname1 = os.path.basename(fdir1)
np_feats1 = get_folder_features(
fdir1,
self.model,
num_workers=num_workers,
batch_size=batch_size,
device=device,
mode=mode,
description=f"FID {fbname1}: ",
verbose=verbose,
)
fbname2 = os.path.basename(fdir2)
np_feats2 = get_folder_features(
fdir2,
self.model,
num_workers=num_workers,
batch_size=batch_size,
device=device,
mode=mode,
description=f"FID {fbname2}: ",
verbose=verbose,
)
mu1, sig1 = np.mean(np_feats1, axis=0), np.cov(np_feats1, rowvar=False)
mu2, sig2 = np.mean(np_feats2, axis=0), np.cov(np_feats2, rowvar=False)
return frechet_distance(mu1, sig1, mu2, sig2)
# compute fid of a folder
elif fdir1 is not None and fdir2 is None:
if verbose:
print(
f"compute FID of a folder with {dataset_name}-{mode}-{dataset_split}-{dataset_res} statistics"
)
fbname1 = os.path.basename(fdir1)
np_feats1 = get_folder_features(
fdir1,
self.model,
num_workers=num_workers,
batch_size=batch_size,
device=device,
mode=mode,
description=f"FID {fbname1}: ",
verbose=verbose,
)
# Load reference FID statistics (download if needed)
ref_mu, ref_sigma = get_reference_statistics(
dataset_name, dataset_res, mode=mode, split=dataset_split
)
mu1, sig1 = np.mean(np_feats1, axis=0), np.cov(np_feats1, rowvar=False)
score = frechet_distance(mu1, sig1, ref_mu, ref_sigma)
return score
else:
raise ValueError("invalid combination of arguments entered")
| 10,849 | 31.779456 | 151 | py |
BVQI | BVQI-master/pyiqa/archs/brisque_arch.py | r"""BRISQUE Metric
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/brisque.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Reference:
MATLAB codes: https://live.ece.utexas.edu/research/Quality/index_algorithms.htm BRISQUE;
Pretrained model from: https://github.com/photosynthesis-team/piq/releases/download/v0.4.0/brisque_svm_weights.pt
"""
import torch
from pyiqa.matlab_utils import imresize
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import estimate_aggd_param, estimate_ggd_param, normalize_img_with_guass
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/brisque_svm_weights.pth"
}
def brisque(
x: torch.Tensor,
kernel_size: int = 7,
kernel_sigma: float = 7 / 6,
test_y_channel: bool = True,
pretrained_model_path: str = None,
) -> torch.Tensor:
r"""Interface of BRISQUE index.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
kernel_size: The side-length of the sliding window used in comparison. Must be an odd value.
kernel_sigma: Sigma of normal distribution.
data_range: Maximum value range of images (usually 1.0 or 255).
to_y_channel: Whether use the y-channel of YCBCR.
pretrained_model_path: The model path.
Returns:
Value of BRISQUE index.
References:
Mittal, Anish, Anush Krishna Moorthy, and Alan Conrad Bovik.
"No-reference image quality assessment in the spatial domain."
IEEE Transactions on image processing 21, no. 12 (2012): 4695-4708.
"""
if test_y_channel and x.size(1) == 3:
x = to_y_channel(x, 255.0)
else:
x = x * 255
features = []
num_of_scales = 2
for _ in range(num_of_scales):
features.append(natural_scene_statistics(x, kernel_size, kernel_sigma))
x = imresize(x, scale=0.5, antialiasing=True)
features = torch.cat(features, dim=-1)
scaled_features = scale_features(features)
if pretrained_model_path:
sv_coef, sv = torch.load(pretrained_model_path)
sv_coef = sv_coef.to(x)
sv = sv.to(x)
# gamma and rho are SVM model parameters taken from official implementation of BRISQUE on MATLAB
# Source: https://live.ece.utexas.edu/research/Quality/index_algorithms.htm
gamma = 0.05
rho = -153.591
sv.t_()
kernel_features = rbf_kernel(features=scaled_features, sv=sv, gamma=gamma)
score = kernel_features @ sv_coef
return score - rho
def natural_scene_statistics(
luma: torch.Tensor, kernel_size: int = 7, sigma: float = 7.0 / 6
) -> torch.Tensor:
luma_nrmlzd = normalize_img_with_guass(luma, kernel_size, sigma, padding="same")
alpha, sigma = estimate_ggd_param(luma_nrmlzd)
features = [alpha, sigma.pow(2)]
shifts = [(0, 1), (1, 0), (1, 1), (-1, 1)]
for shift in shifts:
shifted_luma_nrmlzd = torch.roll(luma_nrmlzd, shifts=shift, dims=(-2, -1))
alpha, sigma_l, sigma_r = estimate_aggd_param(
luma_nrmlzd * shifted_luma_nrmlzd, return_sigma=True
)
eta = (sigma_r - sigma_l) * torch.exp(
torch.lgamma(2.0 / alpha)
- (torch.lgamma(1.0 / alpha) + torch.lgamma(3.0 / alpha)) / 2
)
features.extend((alpha, eta, sigma_l.pow(2), sigma_r.pow(2)))
return torch.stack(features, dim=-1)
def scale_features(features: torch.Tensor) -> torch.Tensor:
lower_bound = -1
upper_bound = 1
# Feature range is taken from official implementation of BRISQUE on MATLAB.
# Source: https://live.ece.utexas.edu/research/Quality/index_algorithms.htm
feature_ranges = torch.tensor(
[
[0.338, 10],
[0.017204, 0.806612],
[0.236, 1.642],
[-0.123884, 0.20293],
[0.000155, 0.712298],
[0.001122, 0.470257],
[0.244, 1.641],
[-0.123586, 0.179083],
[0.000152, 0.710456],
[0.000975, 0.470984],
[0.249, 1.555],
[-0.135687, 0.100858],
[0.000174, 0.684173],
[0.000913, 0.534174],
[0.258, 1.561],
[-0.143408, 0.100486],
[0.000179, 0.685696],
[0.000888, 0.536508],
[0.471, 3.264],
[0.012809, 0.703171],
[0.218, 1.046],
[-0.094876, 0.187459],
[1.5e-005, 0.442057],
[0.001272, 0.40803],
[0.222, 1.042],
[-0.115772, 0.162604],
[1.6e-005, 0.444362],
[0.001374, 0.40243],
[0.227, 0.996],
[-0.117188, 0.09832299999999999],
[3e-005, 0.531903],
[0.001122, 0.369589],
[0.228, 0.99],
[-0.12243, 0.098658],
[2.8e-005, 0.530092],
[0.001118, 0.370399],
]
).to(features)
scaled_features = lower_bound + (upper_bound - lower_bound) * (
features - feature_ranges[..., 0]
) / (feature_ranges[..., 1] - feature_ranges[..., 0])
return scaled_features
def rbf_kernel(
features: torch.Tensor, sv: torch.Tensor, gamma: float = 0.05
) -> torch.Tensor:
dist = (features.unsqueeze(dim=-1) - sv.unsqueeze(dim=0)).pow(2).sum(dim=1)
return torch.exp(-dist * gamma)
@ARCH_REGISTRY.register()
class BRISQUE(torch.nn.Module):
r"""Creates a criterion that measures the BRISQUE score.
Args:
kernel_size (int): By default, the mean and covariance of a pixel is obtained
by convolution with given filter_size. Must be an odd value.
kernel_sigma (float): Standard deviation for Gaussian kernel.
to_y_channel (bool): Whether use the y-channel of YCBCR.
pretrained_model_path (str): The model path.
"""
def __init__(
self,
kernel_size: int = 7,
kernel_sigma: float = 7 / 6,
test_y_channel: bool = True,
pretrained_model_path: str = None,
) -> None:
super().__init__()
self.kernel_size = kernel_size
# This check might look redundant because kernel size is checked within the brisque function anyway.
# However, this check allows to fail fast when the loss is being initialised and training has not been started.
assert kernel_size % 2 == 1, f"Kernel size must be odd, got [{kernel_size}]"
self.kernel_sigma = kernel_sigma
self.test_y_channel = test_y_channel
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""Computation of BRISQUE score as a loss function.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of BRISQUE metric.
"""
return brisque(
x,
kernel_size=self.kernel_size,
kernel_sigma=self.kernel_sigma,
test_y_channel=self.test_y_channel,
pretrained_model_path=self.pretrained_model_path,
)
| 7,328 | 32.774194 | 119 | py |
BVQI | BVQI-master/pyiqa/archs/psnr_arch.py | r"""Peak signal-to-noise ratio (PSNR) Metric
Created by: https://github.com/photosynthesis-team/piq
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Wikipedia from https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
QIQA from https://github.com/francois-rozet/piqa/blob/master/piqa/psnr.py
"""
import torch
import torch.nn as nn
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def psnr(x, y, test_y_channel=False, data_range=1.0, eps=1e-8, color_space="yiq"):
r"""Compute Peak Signal-to-Noise Ratio for a batch of images.
Supports both greyscale and color images with RGB channel order.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
test_y_channel (Boolean): Convert RGB image to YCbCr format and computes PSNR
only on luminance channel if `True`. Compute on all 3 channels otherwise.
data_range: Maximum value range of images (default 1.0).
Returns:
PSNR Index of similarity betwen two images.
"""
if (x.shape[1] == 3) and test_y_channel:
# Convert RGB image to YCbCr and use Y-channel
x = to_y_channel(x, data_range, color_space)
y = to_y_channel(y, data_range, color_space)
mse = torch.mean((x - y) ** 2, dim=[1, 2, 3])
score = 10 * torch.log10(data_range ** 2 / (mse + eps))
return score
@ARCH_REGISTRY.register()
class PSNR(nn.Module):
r"""
Args:
X, Y (torch.Tensor): distorted image and reference image tensor with shape (B, 3, H, W)
test_y_channel (Boolean): Convert RGB image to YCbCr format and computes PSNR
only on luminance channel if `True`. Compute on all 3 channels otherwise.
kwargs: other parameters, including
- data_range: maximun numeric value
- eps: small constant for numeric stability
Return:
score (torch.Tensor): (B, 1)
"""
def __init__(self, test_y_channel=False, crop_border=0, **kwargs):
super().__init__()
self.test_y_channel = test_y_channel
self.kwargs = kwargs
self.crop_border = crop_border
def forward(self, X, Y):
assert (
X.shape == Y.shape
), f"Input and reference images should have the same shape, but got {X.shape} and {Y.shape}"
if self.crop_border != 0:
crop_border = self.crop_border
X = X[..., crop_border:-crop_border, crop_border:-crop_border]
Y = Y[..., crop_border:-crop_border, crop_border:-crop_border]
score = psnr(X, Y, self.test_y_channel, **self.kwargs)
return score
| 2,691 | 33.961039 | 100 | py |
BVQI | BVQI-master/pyiqa/archs/gmsd_arch.py | r"""GMSD Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/GMSD.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Matlab code from https://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.m;
"""
import torch
from torch import nn
from torch.nn import functional as F
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def gmsd(
x: torch.Tensor,
y: torch.Tensor,
T: int = 170,
channels: int = 3,
test_y_channel: bool = True,
) -> torch.Tensor:
r"""GMSD metric.
Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
T: A positive constant that supplies numerical stability.
channels: Number of channels.
test_y_channel: bool, whether to use y channel on ycbcr.
"""
if test_y_channel:
x = to_y_channel(x, 255)
y = to_y_channel(y, 255)
channels = 1
else:
x = x * 255.0
y = y * 255.0
dx = (
(torch.Tensor([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) / 3.0)
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(x)
)
dy = (
(torch.Tensor([[1, 1, 1], [0, 0, 0], [-1, -1, -1]]) / 3.0)
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(x)
)
aveKernel = torch.ones(channels, 1, 2, 2).to(x) / 4.0
Y1 = F.conv2d(x, aveKernel, stride=2, padding=0, groups=channels)
Y2 = F.conv2d(y, aveKernel, stride=2, padding=0, groups=channels)
IxY1 = F.conv2d(Y1, dx, stride=1, padding=1, groups=channels)
IyY1 = F.conv2d(Y1, dy, stride=1, padding=1, groups=channels)
gradientMap1 = torch.sqrt(IxY1 ** 2 + IyY1 ** 2 + 1e-12)
IxY2 = F.conv2d(Y2, dx, stride=1, padding=1, groups=channels)
IyY2 = F.conv2d(Y2, dy, stride=1, padding=1, groups=channels)
gradientMap2 = torch.sqrt(IxY2 ** 2 + IyY2 ** 2 + 1e-12)
quality_map = (2 * gradientMap1 * gradientMap2 + T) / (
gradientMap1 ** 2 + gradientMap2 ** 2 + T
)
score = torch.std(quality_map.view(quality_map.shape[0], -1), dim=1)
return score
@ARCH_REGISTRY.register()
class GMSD(nn.Module):
r"""Gradient Magnitude Similarity Deviation Metric.
Args:
channels: Number of channels.
test_y_channel: bool, whether to use y channel on ycbcr.
Reference:
Xue, Wufeng, Lei Zhang, Xuanqin Mou, and Alan C. Bovik.
"Gradient magnitude similarity deviation: A highly efficient
perceptual image quality index." IEEE Transactions on Image
Processing 23, no. 2 (2013): 684-695.
"""
def __init__(self, channels: int = 3, test_y_channel: bool = True) -> None:
super(GMSD, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
Order of input is important.
"""
assert (
x.shape == y.shape
), f"Input and reference images should have the same shape, but got {x.shape} and {y.shape}"
score = gmsd(x, y, channels=self.channels, test_y_channel=self.test_y_channel)
return score
| 3,418 | 30.657407 | 100 | py |
BVQI | BVQI-master/pyiqa/archs/.ipynb_checkpoints/niqe_arch-checkpoint.py | r"""NIQE and ILNIQE Metrics
NIQE Metric
Created by: https://github.com/xinntao/BasicSR/blob/5668ba75eb8a77e8d2dd46746a36fee0fbb0fdcd/basicsr/metrics/niqe.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Reference:
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
ILNIQE Metric
Created by: Chaofeng Chen (https://github.com/chaofengc)
Reference:
- Python codes: https://github.com/IceClear/IL-NIQE/blob/master/IL-NIQE.py
- Matlab codes: https://www4.comp.polyu.edu.hk/~cslzhang/IQA/ILNIQE/Files/ILNIQE.zip
"""
import math
import numpy as np
import scipy
import scipy.io
import torch
from pyiqa.archs.fsim_arch import _construct_filters
from pyiqa.matlab_utils import (
blockproc,
conv2d,
fitweibull,
fspecial,
imfilter,
imresize,
nancov,
nanmean,
)
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import diff_round, estimate_aggd_param, normalize_img_with_guass
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"niqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"ilniqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ILNIQE_templateModel.mat",
}
def compute_feature(
block: torch.Tensor,
ilniqe: bool = False,
) -> torch.Tensor:
"""Compute features.
Args:
block (Tensor): Image block in shape (b, c, h, w).
Returns:
list: Features with length of 18.
"""
bsz = block.shape[0]
aggd_block = block[:, [0]]
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block)
feat = [alpha, (beta_l + beta_r) / 2]
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for i in range(len(shifts)):
shifted_block = torch.roll(aggd_block, shifts[i], dims=(2, 3))
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block * shifted_block)
# Eq. 8
mean = (beta_r - beta_l) * (
torch.lgamma(2 / alpha) - torch.lgamma(1 / alpha)
).exp()
feat.extend((alpha, mean, beta_l, beta_r))
feat = [x.reshape(bsz, 1) for x in feat]
if ilniqe:
tmp_block = block[:, 1:4]
channels = 4 - 1
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
mu = torch.mean(block[:, 4:7], dim=(2, 3))
sigmaSquare = torch.var(block[:, 4:7], dim=(2, 3))
mu_sigma = torch.stack((mu, sigmaSquare), dim=-1).reshape(bsz, -1)
feat.append(mu_sigma)
channels = 85 - 7
tmp_block = block[:, 7:85].reshape(bsz * channels, 1, *block.shape[2:])
alpha_data, beta_l_data, beta_r_data = estimate_aggd_param(tmp_block)
alpha_data = alpha_data.reshape(bsz, channels)
beta_l_data = beta_l_data.reshape(bsz, channels)
beta_r_data = beta_r_data.reshape(bsz, channels)
alpha_beta = torch.stack(
[alpha_data, (beta_l_data + beta_r_data) / 2], dim=-1
).reshape(bsz, -1)
feat.append(alpha_beta)
tmp_block = block[:, 85:109]
channels = 109 - 85
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
feat = torch.cat(feat, dim=-1)
return feat
def niqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
block_size_h: int = 96,
block_size_w: int = 96,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (Tensor): A 7x7 Gaussian window used for smoothing the image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
img_normalized = normalize_img_with_guass(img, padding="replicate")
distparam.append(
blockproc(
img_normalized,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
)
)
if scale == 1:
img = imresize(img / 255.0, scale=0.5, antialiasing=True)
img = img * 255.0
distparam = torch.cat(distparam, -1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = nanmean(distparam, dim=1)
cov_distparam = nancov(distparam)
# compute niqe quality, Eq. 10 in the paper
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = (mu_pris_param - mu_distparam).unsqueeze(1)
quality = torch.bmm(torch.bmm(diff, invcov_param), diff.transpose(1, 2)).squeeze()
quality = torch.sqrt(quality)
return quality
def calculate_niqe(
img: torch.Tensor,
crop_border: int = 0,
test_y_channel: bool = True,
pretrained_model_path: str = None,
color_space: str = "yiq",
**kwargs,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
test_y_channel (Bool): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
mu_pris_param = np.ravel(params["mu_prisparam"])
cov_pris_param = params["cov_prisparam"]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
if test_y_channel and img.shape[1] == 3:
print(img.shape)
img = to_y_channel(img, 255, color_space)
img = diff_round(img)
img = img.to(torch.float64)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
niqe_result = niqe(img, mu_pris_param, cov_pris_param)
return niqe_result
def gauDerivative(sigma, in_ch=1, out_ch=1, device=None):
halfLength = math.ceil(3 * sigma)
x, y = np.meshgrid(
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
)
gauDerX = x * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
gauDerY = y * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
dx = torch.from_numpy(gauDerX).to(device)
dy = torch.from_numpy(gauDerY).to(device)
dx = dx.repeat(out_ch, in_ch, 1, 1)
dy = dy.repeat(out_ch, in_ch, 1, 1)
return dx, dy
def ilniqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
principleVectors: torch.Tensor,
meanOfSampleData: torch.Tensor,
resize: bool = True,
block_size_h: int = 84,
block_size_w: int = 84,
) -> torch.Tensor:
"""Calculate IL-NIQE (Integrated Local Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
principleVectors (Tensor): Features from official .mat file.
meanOfSampleData (Tensor): Features from official .mat file.
resize (Bloolean): resize image. Default: True.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 84 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 84 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
sigmaForGauDerivative = 1.66
KforLog = 0.00001
normalizedWidth = 524
minWaveLength = 2.4
sigmaOnf = 0.55
mult = 1.31
dThetaOnSigma = 1.10
scaleFactorForLoG = 0.87
scaleFactorForGaussianDer = 0.28
sigmaForDownsample = 0.9
EPS = 1e-8
scales = 3
orientations = 4
infConst = 10000
nanConst = 2000
if resize:
img = imresize(img, sizes=(normalizedWidth, normalizedWidth))
img = img.clamp(0.0, 255.0)
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
ospace_weight = torch.tensor(
[
[0.3, 0.04, -0.35],
[0.34, -0.6, 0.17],
[0.06, 0.63, 0.27],
]
).to(img)
O_img = img.permute(0, 2, 3, 1) @ ospace_weight.T
O_img = O_img.permute(0, 3, 1, 2)
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
struct_dis = normalize_img_with_guass(
O_img[:, [2]], kernel_size=5, sigma=5.0 / 6, padding="replicate"
)
dx, dy = gauDerivative(
sigmaForGauDerivative / (scale ** scaleFactorForGaussianDer), device=img
)
Ix = conv2d(O_img, dx.repeat(3, 1, 1, 1), groups=3)
Iy = conv2d(O_img, dy.repeat(3, 1, 1, 1), groups=3)
GM = torch.sqrt(Ix ** 2 + Iy ** 2 + EPS)
Ixy = torch.stack((Ix, Iy), dim=2).reshape(
Ix.shape[0], Ix.shape[1] * 2, *Ix.shape[2:]
) # reshape to (IxO1, IxO1, IxO2, IyO2, IxO3, IyO3)
logRGB = torch.log(img + KforLog)
logRGBMS = logRGB - logRGB.mean(dim=(2, 3), keepdim=True)
Intensity = logRGBMS.sum(dim=1, keepdim=True) / np.sqrt(3)
BY = (logRGBMS[:, [0]] + logRGBMS[:, [1]] - 2 * logRGBMS[:, [2]]) / np.sqrt(6)
RG = (logRGBMS[:, [0]] - logRGBMS[:, [1]]) / np.sqrt(2)
compositeMat = torch.cat([struct_dis, GM, Intensity, BY, RG, Ixy], dim=1)
O3 = O_img[:, [2]]
# gabor filter in shape (b, ori * scale, h, w)
LGFilters = _construct_filters(
O3,
scales=scales,
orientations=orientations,
min_length=minWaveLength / (scale ** scaleFactorForLoG),
sigma_f=sigmaOnf,
mult=mult,
delta_theta=dThetaOnSigma,
use_lowpass_filter=False,
)
# reformat to scale * ori
b, _, h, w = LGFilters.shape
LGFilters = (
LGFilters.reshape(b, orientations, scales, h, w)
.transpose(1, 2)
.reshape(b, -1, h, w)
)
# TODO: current filters needs to be transposed to get same results as matlab, find the bug
LGFilters = LGFilters.transpose(-1, -2)
fftIm = torch.fft.fft2(O3)
logResponse = []
partialDer = []
GM = []
for index in range(LGFilters.shape[1]):
filter = LGFilters[:, [index]]
response = torch.fft.ifft2(filter * fftIm)
realRes = torch.real(response)
imagRes = torch.imag(response)
partialXReal = conv2d(realRes, dx)
partialYReal = conv2d(realRes, dy)
realGM = torch.sqrt(partialXReal ** 2 + partialYReal ** 2 + EPS)
partialXImag = conv2d(imagRes, dx)
partialYImag = conv2d(imagRes, dy)
imagGM = torch.sqrt(partialXImag ** 2 + partialYImag ** 2 + EPS)
logResponse.append(realRes)
logResponse.append(imagRes)
partialDer.append(partialXReal)
partialDer.append(partialYReal)
partialDer.append(partialXImag)
partialDer.append(partialYImag)
GM.append(realGM)
GM.append(imagGM)
logResponse = torch.cat(logResponse, dim=1)
partialDer = torch.cat(partialDer, dim=1)
GM = torch.cat(GM, dim=1)
compositeMat = torch.cat((compositeMat, logResponse, partialDer, GM), dim=1)
distparam.append(
blockproc(
compositeMat,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
ilniqe=True,
)
)
gauForDS = fspecial(math.ceil(6 * sigmaForDownsample), sigmaForDownsample).to(
img
)
filterResult = imfilter(
O_img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
O_img = filterResult[..., ::2, ::2]
filterResult = imfilter(
img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
img = filterResult[..., ::2, ::2]
distparam = torch.cat(distparam, dim=-1) # b, block_num, feature_num
distparam[distparam > infConst] = infConst
# fit a MVG (multivariate Gaussian) model to distorted patch features
coefficientsViaPCA = torch.bmm(
principleVectors.transpose(1, 2),
(distparam - meanOfSampleData.unsqueeze(1)).transpose(1, 2),
)
final_features = coefficientsViaPCA.transpose(1, 2)
b, blk_num, feat_num = final_features.shape
# remove block features with nan and compute nonan cov
cov_distparam = nancov(final_features)
# replace nan in final features with mu
mu_final_features = nanmean(final_features, dim=1, keepdim=True)
final_features_withmu = torch.where(
torch.isnan(final_features), mu_final_features, final_features
)
# compute ilniqe quality
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = final_features_withmu - mu_pris_param.unsqueeze(1)
quality = (torch.bmm(diff, invcov_param) * diff).sum(dim=-1)
quality = torch.sqrt(quality).mean(dim=1)
return quality
def calculate_ilniqe(
img: torch.Tensor, crop_border: int = 0, pretrained_model_path: str = None, **kwargs
) -> torch.Tensor:
"""Calculate IL-NIQE metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: IL-NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
img = img * 255.0
img = diff_round(img)
# float64 precision is critical to be consistent with matlab codes
img = img.to(torch.float64)
mu_pris_param = np.ravel(params["templateModel"][0][0])
cov_pris_param = params["templateModel"][0][1]
meanOfSampleData = np.ravel(params["templateModel"][0][2])
principleVectors = params["templateModel"][0][3]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
meanOfSampleData = torch.from_numpy(meanOfSampleData).to(img)
principleVectors = torch.from_numpy(principleVectors).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
meanOfSampleData = meanOfSampleData.repeat(img.size(0), 1)
principleVectors = principleVectors.repeat(img.size(0), 1, 1)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
ilniqe_result = ilniqe(
img, mu_pris_param, cov_pris_param, principleVectors, meanOfSampleData
)
return ilniqe_result
@ARCH_REGISTRY.register()
class NIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Mittal, Anish, Rajiv Soundararajan, and Alan C. Bovik.
"Making a “completely blind” image quality analyzer."
IEEE Signal Processing Letters (SPL) 20.3 (2012): 209-212.
"""
def __init__(
self,
channels: int = 1,
test_y_channel: bool = True,
color_space: str = "yiq",
crop_border: int = 0,
pretrained_model_path: str = None,
) -> None:
super(NIQE, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
self.color_space = color_space
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_niqe(
X,
self.crop_border,
self.test_y_channel,
self.pretrained_model_path,
self.color_space,
)
return score
@ARCH_REGISTRY.register()
class ILNIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Zhang, Lin, Lei Zhang, and Alan C. Bovik. "A feature-enriched
completely blind image quality evaluator." IEEE Transactions
on Image Processing 24.8 (2015): 2579-2591.
"""
def __init__(
self, channels: int = 3, crop_border: int = 0, pretrained_model_path: str = None
) -> None:
super(ILNIQE, self).__init__()
self.channels = channels
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(
default_model_urls["ilniqe"]
)
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_ilniqe(X, self.crop_border, self.pretrained_model_path)
return score
| 20,124 | 35.196043 | 120 | py |
BVQI | BVQI-master/pyiqa/losses/losses.py | import math
import torch
from torch import autograd as autograd
from torch import nn as nn
from torch.nn import functional as F
from pyiqa.utils.registry import LOSS_REGISTRY
from .loss_util import weighted_loss
_reduction_modes = ["none", "mean", "sum"]
@weighted_loss
def l1_loss(pred, target):
return F.l1_loss(pred, target, reduction="none")
@weighted_loss
def mse_loss(pred, target):
return F.mse_loss(pred, target, reduction="none")
@weighted_loss
def cross_entropy(pred, target):
return F.cross_entropy(pred, target, reduction="none")
@weighted_loss
def nll_loss(pred, target):
return F.nll_loss(pred, target, reduction="none")
@weighted_loss
def charbonnier_loss(pred, target, eps=1e-12):
return torch.sqrt((pred - target) ** 2 + eps)
@LOSS_REGISTRY.register()
class L1Loss(nn.Module):
"""L1 (mean absolute error, MAE) loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super(L1Loss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * l1_loss(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class MSELoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super(MSELoss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * mse_loss(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class CrossEntropyLoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super().__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * cross_entropy(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class NLLLoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super().__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * nll_loss(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
eps (float): A value used to control the curvature near zero. Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction="mean", eps=1e-12):
super(CharbonnierLoss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(
pred, target, weight, eps=self.eps, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class WeightedTVLoss(L1Loss):
"""Weighted TV loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
if reduction not in ["mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: mean | sum"
)
super(WeightedTVLoss, self).__init__(
loss_weight=loss_weight, reduction=reduction
)
def forward(self, pred, weight=None):
if weight is None:
y_weight = None
x_weight = None
else:
y_weight = weight[:, :, :-1, :]
x_weight = weight[:, :, :, :-1]
y_diff = super().forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=y_weight)
x_diff = super().forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=x_weight)
loss = x_diff + y_diff
return loss
| 7,764 | 31.763713 | 98 | py |
BVQI | BVQI-master/pyiqa/losses/loss_util.py | import functools
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are 'none', 'mean' and 'sum'.
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction="mean"):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
'none', 'mean' and 'sum'. Default: 'mean'.
Returns:
Tensor: Loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == "sum":
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over weight region
elif reduction == "mean":
if weight.size(1) > 1:
weight = weight.sum()
else:
weight = weight.sum() * loss.size(1)
loss = loss.sum() / weight
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
**kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction="mean", **kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction)
return loss
return wrapper
| 2,904 | 28.948454 | 78 | py |
BVQI | BVQI-master/pyiqa/losses/__init__.py | from copy import deepcopy
from pyiqa.utils import get_root_logger
from pyiqa.utils.registry import LOSS_REGISTRY
from .iqa_losses import EMDLoss, NiNLoss, PLCCLoss
from .losses import CharbonnierLoss, L1Loss, MSELoss, WeightedTVLoss
__all__ = [
"L1Loss",
"MSELoss",
"CharbonnierLoss",
"WeightedTVLoss",
"EMDLoss",
"PLCCLoss",
"NiNLoss",
]
def build_loss(opt):
"""Build loss from options.
Args:
opt (dict): Configuration. It must contain:
type (str): Model type.
"""
opt = deepcopy(opt)
loss_type = opt.pop("type")
loss = LOSS_REGISTRY.get(loss_type)(**opt)
logger = get_root_logger()
logger.info(f"Loss [{loss.__class__.__name__}] is created.")
return loss
| 747 | 21.666667 | 68 | py |
BVQI | BVQI-master/pyiqa/losses/iqa_losses.py | import numpy as np
import torch
from cv2 import reduce
from torch import nn as nn
from torch.nn import functional as F
from pyiqa.utils.registry import LOSS_REGISTRY
from .loss_util import weighted_loss
_reduction_modes = ["none", "mean", "sum"]
@weighted_loss
def emd_loss(pred, target, r=2):
"""
Args:
pred (Tensor): of shape (N, C). Predicted tensor.
target (Tensor): of shape (N, C). Ground truth tensor.
r (float): norm level, default l2 norm.
"""
loss = torch.abs(torch.cumsum(pred, dim=-1) - torch.cumsum(target, dim=-1)) ** r
loss = loss.mean(dim=-1) ** (1.0 / r)
return loss
@LOSS_REGISTRY.register()
class EMDLoss(nn.Module):
"""EMD (earth mover distance) loss."""
def __init__(self, loss_weight=1.0, r=2, reduction="mean"):
super(EMDLoss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.r = r
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
return self.loss_weight * emd_loss(
pred, target, r=self.r, weight=weight, reduction=self.reduction
)
def plcc_loss(pred, target):
"""
Args:
pred (Tensor): of shape (N, 1). Predicted tensor.
target (Tensor): of shape (N, 1). Ground truth tensor.
"""
batch_size = pred.shape[0]
if batch_size > 1:
vx = pred - pred.mean()
vy = target - target.mean()
loss = F.normalize(vx, p=2, dim=0) * F.normalize(vy, p=2, dim=0)
loss = (1 - loss.sum()) / 2 # normalize to [0, 1]
else:
loss = F.l1_loss(pred, target)
return loss.mean()
@LOSS_REGISTRY.register()
class PLCCLoss(nn.Module):
"""PLCC loss, induced from Pearson’s Linear Correlation Coefficient."""
def __init__(self, loss_weight=1.0):
super(PLCCLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, pred, target):
return self.loss_weight * plcc_loss(pred, target)
@LOSS_REGISTRY.register()
class RankLoss(nn.Module):
"""Monotonicity regularization loss, will be zero when rankings of pred and target are the same.
Reference:
- https://github.com/lidq92/LinearityIQA/blob/master/IQAloss.py
"""
def __init__(self, detach=False, loss_weight=1.0):
super(RankLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, pred, target):
if pred.size(0) > 1: #
ranking_loss = F.relu((pred - pred.t()) * torch.sign((target.t() - target)))
scale = 1 + torch.max(ranking_loss.detach())
loss = ranking_loss.mean() / scale
else:
loss = F.l1_loss(pred, target.detach()) # 0 for batch with single sample.
return self.loss_weight * loss
def norm_loss_with_normalization(pred, target, p, q):
"""
Args:
pred (Tensor): of shape (N, 1). Predicted tensor.
target (Tensor): of shape (N, 1). Ground truth tensor.
"""
batch_size = pred.shape[0]
if batch_size > 1:
vx = pred - pred.mean()
vy = target - target.mean()
scale = np.power(2, p) * np.power(batch_size, max(0, 1 - p / q)) # p, q>0
norm_pred = F.normalize(vx, p=q, dim=0)
norm_target = F.normalize(vy, p=q, dim=0)
loss = torch.norm(norm_pred - norm_target, p=p) / scale
else:
loss = F.l1_loss(pred, target)
return loss.mean()
@LOSS_REGISTRY.register()
class NiNLoss(nn.Module):
"""NiN (Norm in Norm) loss
Reference:
- Dingquan Li, Tingting Jiang, and Ming Jiang. Norm-in-Norm Loss with Faster Convergence and Better
Performance for Image Quality Assessment. ACMM2020.
- https://arxiv.org/abs/2008.03889
- https://github.com/lidq92/LinearityIQA
This loss can be simply described as: l1_norm(normalize(pred - pred_mean), normalize(target - target_mean))
"""
def __init__(self, loss_weight=1.0, p=1, q=2):
super(NiNLoss, self).__init__()
self.loss_weight = loss_weight
self.p = p
self.q = q
def forward(self, pred, target):
return self.loss_weight * norm_loss_with_normalization(
pred, target, self.p, self.q
)
| 4,414 | 29.874126 | 111 | py |
BVQI | BVQI-master/V1_extraction/extract_multi_scale_v1_features.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
# resolutioin: 960*540,480*270,240*135,120*67
# downsample rate: 1.0, 0.5, 0.25, 0.125
if __name__ == '__main__':
data_name = 'konvid1k'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/lustre/lliao/Dataset/KoNViD_1k/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/lustre/lliao/Dataset/LIVE-VQC/Video'
else:
raise NotImplementedError
feat_path = './features'
save_path = os.path.join(feat_path, data_name + 'multi_scale')
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
width_list = [960, 480, 240, 120]
height_list = [540, 270, 135, 67]
#downsample_rate_list = [1.0, 0.5, 0.25, 0.125]
scale = 5
orientations = 8
kernel_size = 19
row_downsample = 4
column_downsample = 4
pca_d = 10
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
start_time = time()
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
video_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
v1_features = []
transform_list = []
for i in range(len(width_list)):
width = width_list[i]
height = height_list[i]
v1_features.append(
torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample))))
transform_list.append(
transforms.Compose([
transforms.ToTensor(),
transforms.Resize((height, width))
]))
# for i in range(len(downsample_rate_list)):
# width = int(video_width * downsample_rate_list[i])
# height = int(video_height * downsample_rate_list[i])
# v1_features.append(
# torch.zeros(
# frame_num,
# (scale * orientations * round(width / column_downsample) *
# round(height / row_downsample))))
# transform_list.append(
# transforms.Compose([
# transforms.ToTensor(),
# transforms.Resize((height, width))
# ]))
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for i in range(len(width_list)):# + len(downsample_rate_list)):
frame = transform_list[i](frame_gray)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[i][count, :] = gb(frame).detach().cpu()
count += 1
for i in range(len(width_list)):# + len(downsample_rate_list)):
v1_features[i] = torch.nan_to_num(v1_features[i])
v1_features[i] = v1_features[i].numpy()
pca = decomposition.PCA(pca_d)
v1_features[i] = pca.fit_transform(v1_features[i])
np.save(
os.path.join(
save_path,
'{}_{}.npy'.format(i, os.path.split(video_name)[-1])),
v1_features[i])
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
| 4,553 | 34.578125 | 80 | py |
BVQI | BVQI-master/V1_extraction/utilities.py | import numpy as np
from sklearn import linear_model
import scipy
from scipy.optimize import curve_fit
from sklearn.metrics import mean_squared_error
import cv2
from scipy import signal
from sklearn.preprocessing import StandardScaler
def compute_v1_curvature(features):
len = features.shape[0]
curvatures = np.zeros((len-2, 1))
theta = np.zeros((len-2, 1))
distance = np.zeros((len-2, 1))
for fn in range(1, len - 1):
prev = features[fn - 1, :]
cur = features[fn, :]
next = features[fn + 1, :]
numerator = np.dot((next - cur).T, (cur-prev)).squeeze()
denominator = np.linalg.norm(next - cur) * np.linalg.norm(cur - prev)
if denominator<0.0001 or np.abs(numerator)<0.0001:
theta = 3.141592/2
else:
theta = np.arccos(numerator / (0.000001+denominator))
cos_alpha = theta*np.power(np.linalg.norm(next - prev), 1)
#cos_alpha = np.arccos(numerator / (0.000001+denominator))*np.power(np.linalg.norm(next - prev), 0.5)
curvatures[fn - 1] = cos_alpha
# theta[fn - 1] = np.arccos(numerator / (0.000001+denominator))
# distance[fn - 1] = np.linalg.norm(next - prev)
#
# if np.isnan(theta[fn - 1])| np.isposinf(theta[fn - 1]) | np.isneginf(theta[fn - 1]):
# theta = 0
# mu = np.mean(distance)
# sigma = np.std(distance)
#
# mu_niqe = np.mean(theta)
# sigma_niqe = np.std(theta)
#
# theta = (theta-mu_niqe)/sigma_niqe*sigma+mu
# for fn in range(1, len - 1):
# curvatures[fn - 1] = (theta[fn - 1]-np.min(theta))*distance[fn - 1]
# for fn in range(1, len - 1):
# curvatures[fn - 1] = theta[fn - 1]*distance[fn - 1]/np.mean(distance)
return curvatures
def compute_discrete_v1_curvature(features):
len = features.shape[0]
curvatures = np.zeros((len//3, 1))
theta = np.zeros((len//3, 1))
distance = np.zeros((len//3, 1))
for fn in range(0, len//3):
prev = features[fn*3]
cur = features[fn*3+1]
next = features[fn*3+2]
numerator = np.dot((next - cur).T, (cur-prev)).squeeze()
denominator = np.linalg.norm(next - cur) * np.linalg.norm(cur - prev)
if denominator<0.0001 or np.abs(numerator)<0.0001:
theta = 3.141592/2
else:
theta = np.arccos(numerator / (0.000001+denominator))
cos_alpha = theta*np.power(np.linalg.norm(next - prev), 1)
#cos_alpha = np.arccos(numerator / (0.000001+denominator))*np.power(np.linalg.norm(next - prev), 0.5)
curvatures[fn - 1] = cos_alpha
# theta[fn - 1] = np.arccos(numerator / (0.000001+denominator))
# distance[fn - 1] = np.linalg.norm(next - prev)
#
# if np.isnan(theta[fn - 1])| np.isposinf(theta[fn - 1]) | np.isneginf(theta[fn - 1]):
# theta = 0
# mu = np.mean(distance)
# sigma = np.std(distance)
#
# mu_niqe = np.mean(theta)
# sigma_niqe = np.std(theta)
#
# theta = (theta-mu_niqe)/sigma_niqe*sigma+mu
# for fn in range(1, len - 1):
# curvatures[fn - 1] = (theta[fn - 1]-np.min(theta))*distance[fn - 1]
# for fn in range(1, len - 1):
# curvatures[fn - 1] = theta[fn - 1]*distance[fn - 1]/np.mean(distance)
return curvatures
def compute_lgn_curvature(features):
len = features.shape[0]
curvatures = np.zeros((len-2, 1))
theta = np.zeros((len-2, 1))
distance = np.zeros((len-2, 1))
for fn in range(1, len - 1):
prev = features[fn - 1, :]
cur = features[fn, :]
next = features[fn + 1, :]
numerator = np.dot((next - cur).T, (cur-prev)).squeeze()
denominator = np.linalg.norm(next - cur) * np.linalg.norm(cur - prev)
if denominator<0.0001 or np.abs(numerator)<0.0001:
theta = 3.141592/2
else:
theta = np.arccos(numerator / (0.000001+denominator))
cos_alpha = np.power(theta*np.power(np.linalg.norm(next - prev), 0.25), 1)
#cos_alpha = np.arccos(numerator / (0.000001+denominator))*np.power(np.linalg.norm(next - prev), 0.5)
curvatures[fn - 1] = cos_alpha
return curvatures
def linear_pred(features, k):
len = features.shape[0]
linear_error = np.zeros((len - k, 1))
for fn in range(1, len - k):
prev = features[fn:fn+k, :].T
cur = features[fn+k, :]
lr = linear_model.LinearRegression()
lr.fit(prev, cur)
pred = lr.predict(prev)
linear_error[fn] = np.linalg.norm(cur-pred, 1)
return linear_error
def extract_img_features(video_name, rate):
vid_cap = cv2.VideoCapture(video_name)
img_features = []
while 1:
ret, img = vid_cap.read()
if not ret:
break
width = int(img.shape[1] * rate)
height = int(img.shape[0] * rate)
img = cv2.resize(img, dsize=(width,height))
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray_norm = cv2.normalize(img_gray,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
img_features.append(
img_gray_norm.reshape(
img_gray_norm.shape[0] * img_gray_norm.shape[1], 1))
img_features = np.array(img_features).squeeze()
return img_features
def geometric_mean2(data):
total = 1
for i in data:
total*=i
return pow(total, 1/len(data))
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(
np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
def compute_metrics(y_pred, y, haveFit=False):
'''
compute metrics btw predictions & labels
'''
# compute SRCC & KRCC
SRCC = scipy.stats.spearmanr(y, y_pred)[0]
try:
KRCC = scipy.stats.kendalltau(y, y_pred)[0]
except:
KRCC = scipy.stats.kendalltau(y, y_pred, method='asymptotic')[0]
if not haveFit:
# logistic regression btw y_pred & y
beta_init = [np.max(y), np.min(y), np.mean(y_pred), 0.5]
popt, _ = curve_fit(logistic_func,
y_pred,
y,
p0=beta_init,
maxfev=int(1e8))
y_pred_logistic = logistic_func(y_pred, *popt)
else:
y_pred_logistic = y_pred
# compute PLCC RMSE
PLCC = scipy.stats.pearsonr(y, y_pred_logistic)[0]
RMSE = np.sqrt(mean_squared_error(y, y_pred_logistic))
return [SRCC, PLCC, KRCC, RMSE]
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(
np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
def plot_scatter(input_x,
input_y,
save_path,
xlabel='MOS',
ylabel='Curvatures',
haveFit=False):
# 可视化
p = np.polyfit(input_x, input_y, 1).squeeze()
min_val = np.min(input_x)
max_val = np.max(input_x)
x = np.linspace(min_val, max_val, 1000)
f = np.poly1d(p)
y = f(x)
srcc, plcc, krcc, rmse = compute_metrics(input_x.squeeze(),
input_y.squeeze(),
haveFit=haveFit)
plt.rcParams['figure.figsize'] = (8, 6)
plt.scatter(input_x, input_y, s=7.5, c='b', marker='D')
plt.plot(x, y, c='r')
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
plt.title('SRCC: {} | PLCC: {} | RMSE: {}'.format(
round(srcc, 3), round(plcc, 3), round(rmse, 3)), fontsize=20)
# plt.xlim(2.37, 3.78) # 确定横轴坐标范围
# plt.ylim(2.37, 3.78)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
plt.savefig(save_path)
plt.clf()
def clear_data(data):
data[np.isnan(data) | np.isposinf(data)
| np.isneginf(data)] = 0
return data
def clear_mos(data, mos):
data = data.squeeze()
mos = mos[~np.isnan(data)]
data = data[~np.isnan(data)]
mos = mos[~np.isposinf(data)]
data = data[~np.isposinf(data)]
mos = mos[~np.isneginf(data)]
data = data[~np.isneginf(data)]
return data, mos
def fit_curve(x, y):
"""fit x to y"""
# logistic regression
beta_init = [np.max(y), np.min(y), np.mean(x), 0.5]
popt, _ = curve_fit(logistic_func, x, y, p0=beta_init, maxfev=int(1e8))
y_logistic = logistic_func(x, *popt)
return y_logistic
def hysteresis_pooling(chunk):
'''parameters'''
tau = 8 # 2-sec * 30 fps
comb_alpha = 0.2 # weighting
''' function body '''
chunk = np.asarray(chunk, dtype=np.float64)
chunk_length = len(chunk)
l = np.zeros(chunk_length)
m = np.zeros(chunk_length)
q = np.zeros(chunk_length)
for t in range(chunk_length):
''' calculate l[t] - the memory component '''
if t == 0: # corner case
l[t] = chunk[t]
else:
# get previous frame indices
idx_prev = slice(max(0, t-tau), max(0, t-1)+1)
# print(idx_prev)
# calculate min scores
l[t] = min(chunk[idx_prev])
# print("l[t]:", l[t])
''' compute m[t] - the current component '''
if t == chunk_length - 1: # corner case
m[t] = chunk[t]
else:
# get next frame indices
idx_next = slice(t, min(t + tau, chunk_length))
# print(idx_next)
# sort ascend order
v = np.sort(chunk[idx_next])
# generated Gaussian weight
win_len = len(v) * 2.0 - 1.0
win_sigma = win_len / 6.0
# print(win_len, win_sigma)
gaussian_win = signal.gaussian(win_len, win_sigma)
gaussian_half_win = gaussian_win[len(v)-1:]
# normalize gaussian descend kernel
gaussian_half_win = np.divide(gaussian_half_win, np.sum(gaussian_half_win))
# print(gaussian_half_win)
m[t] = sum([x * y for x, y in zip(v, gaussian_half_win)])
# print("m[t]:", m[t])
''' combine l[t] and m[t] into one q[t] '''
q = comb_alpha * l + (1.0 - comb_alpha) * m
# print(q)
# print(np.mean(q))
return q, np.mean(q) | 10,552 | 32.715655 | 109 | py |
BVQI | BVQI-master/V1_extraction/extract_v1_features_480.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = 'livevqc'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/lustre/lliao/Dataset/KoNViD_1k/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/lustre/lliao/Dataset/LIVE-VQC/Video'
else:
raise NotImplementedError
width = 480
height = 270
feat_path = './features'
save_path = os.path.join(feat_path, data_name + str(width))
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
scale = 6
orientations = 8
kernel_size = 39
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,798 | 30.1 | 79 | py |
BVQI | BVQI-master/V1_extraction/STEM_pca.py | import math
import scipy.io
import numpy as np
import warnings
import os
import pandas as pd
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
import time
from utilities import *
import pickle
time_cost = 0
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
data_name = 'KoNViD'
if data_name == 'KoNViD':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k_metadata.csv')
flickr_ids = meta_data.flickr_id
mos = meta_data.mos.to_numpy()
elif data_name == 'LiveVQC':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc_metadata.csv')
flickr_ids = meta_data.File
mos = meta_data.MOS.to_numpy()
data_length = mos.shape[0]
pca_d = [2, 3, 5, 10, 30, 50]
k = 6
tem_quality = np.zeros((data_length, 1))
fused_quality = np.zeros((data_length, 1))
fused_quality2 = np.zeros((data_length, 1))
lgn_quality = np.zeros((data_length, 1))
V1_quality = np.zeros((data_length, 1))
niqe_quality = np.zeros((data_length, 1))
for id in range(len(pca_d)):
print(pca_d[id])
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k480/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc480/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d[id])
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d[id])
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
| 5,155 | 37.766917 | 161 | py |
BVQI | BVQI-master/V1_extraction/extract_v1_features.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = 'livevqc'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/data/xkm/datasets/KoNViD_1k_videos/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/data/xkm/datasets/LIVE_VQC/Video'
else:
raise NotImplementedError
feat_path = './features'
save_path = os.path.join(feat_path, data_name)
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
width = 480
height = 270
scale = 5
orientations = 8
kernel_size = 19
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,986 | 30.776596 | 79 | py |
BVQI | BVQI-master/V1_extraction/gabor_filter.py | import math
import cmath
import torch
import torch.nn as nn
class GaborFilters(nn.Module):
def __init__(self,
n_scale=5,
n_orientation=8,
kernel_radius=9,
row_downsample=4,
column_downsample=4,
device='cpu'):
super().__init__()
self.kernel_size = kernel_radius * 2 + 1
self.kernel_radius = kernel_radius
self.n_scale = n_scale
self.n_orientation = n_orientation
self.row_downsample = row_downsample
self.column_downsample = column_downsample
self.to(device)
self.gb = self.make_gabor_filters().to(device)
def make_gabor_filters(self):
kernel_size = self.kernel_size
n_scale = self.n_scale
n_orientation = self.n_orientation
gb = torch.zeros((n_scale * n_orientation, kernel_size, kernel_size),
dtype=torch.cfloat)
fmax = 0.25
gama = math.sqrt(2)
eta = math.sqrt(2)
for i in range(n_scale):
fu = fmax / (math.sqrt(2)**i)
alpha = fu / gama
beta = fu / eta
for j in range(n_orientation):
tetav = (j / n_orientation) * math.pi
g_filter = torch.zeros((kernel_size, kernel_size),
dtype=torch.cfloat)
for x in range(1, kernel_size + 1):
for y in range(1, kernel_size + 1):
xprime = (x - (
(kernel_size + 1) / 2)) * math.cos(tetav) + (y - (
(kernel_size + 1) / 2)) * math.sin(tetav)
yprime = -(x - (
(kernel_size + 1) / 2)) * math.sin(tetav) + (y - (
(kernel_size + 1) / 2)) * math.cos(tetav)
g_filter[x - 1][
y -
1] = (fu**2 / (math.pi * gama * eta)) * math.exp(-(
(alpha**2) * (xprime**2) + (beta**2) *
(yprime**2))) * cmath.exp(
1j * 2 * math.pi * fu * xprime)
gb[i * n_orientation + j] = g_filter
return gb
def forward(self, x):
batch_size = x.size(0)
cn = x.size(1)
sy = x.size(2)
sx = x.size(3)
assert cn == 1
gb = self.gb
gb = gb[:, None, :, :]
res = nn.functional.conv2d(input=x, weight=gb, padding='same')
res = res.view(batch_size, -1, sy, sx)
res = torch.abs(res)
res = res[:, :, ::self.row_downsample, :]
res = res[:, :, :, ::self.column_downsample]
res = res.reshape(batch_size, res.size(1), -1)
res = (res - torch.mean(res, 2, keepdim=True)) / torch.std(
res, 2, keepdim=True)
res = res.view(batch_size, -1)
return res
if __name__ == "__main__":
import time
from PIL import Image
from torchvision.transforms import transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
img = Image.open(
'/mnt/d/datasets/KonIQ-10k/images_512x384/826373.jpg').convert('L')
img = transforms.ToTensor()(img)
img_imag = torch.zeros(img.size())
img = torch.stack((img, img_imag), 3)
img = torch.view_as_complex(img)
img = img[None, :, :, :]
gb = GaborFilters(device=device)
img = img.to(device)
start_time = time.time()
res = gb(img)
end_time = time.time()
print(res.shape)
print('{}s elapsed running in {}'.format(end_time - start_time, device))
| 3,734 | 31.763158 | 79 | py |
BVQI | BVQI-master/V1_extraction/STEM.py | import math
import scipy.io
import numpy as np
import warnings
import os
import pandas as pd
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
import time
from utilities import *
import pickle
time_cost = 0
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
data_name = 'KoNViD'
if data_name == 'KoNViD':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k_metadata.csv')
flickr_ids = meta_data.flickr_id
mos = meta_data.mos.to_numpy()
elif data_name == 'LiveVQC':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc_metadata.csv')
flickr_ids = meta_data.File
mos = meta_data.MOS.to_numpy()
data_length = mos.shape[0]
k = 6
pca_d = 5
kernel_size = [5, 11, 17, 19, 23, 29, 35, 41, 47, 53]
tem_quality = np.zeros((data_length, 1))
fused_quality = np.zeros((data_length, 1))
fused_quality2 = np.zeros((data_length, 1))
lgn_quality = np.zeros((data_length, 1))
V1_quality = np.zeros((data_length, 1))
niqe_quality = np.zeros((data_length, 1))
for id in range(len(kernel_size)):
print(kernel_size[id])
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k4'+str(kernel_size[id])+'/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc4'+str(kernel_size[id])+'/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
# pca = PCA(n_components=pca_d)
# pca.fit(V1_feature)
# V1_PCA = pca.transform(V1_feature)
#lgn_PCA = lgn_feature[:, :pca_d]
V1_PCA = V1_feature[:, :pca_d]
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
| 5,318 | 38.110294 | 170 | py |
BVQI | BVQI-master/V1_extraction/STEM_resolution.py | import math
import scipy.io
import numpy as np
import warnings
import os
import pandas as pd
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
import time
from utilities import *
import pickle
time_cost = 0
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
data_name = 'KoNViD'
if data_name == 'KoNViD':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k_metadata.csv')
flickr_ids = meta_data.flickr_id
mos = meta_data.mos.to_numpy()
elif data_name == 'LiveVQC':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc_metadata.csv')
flickr_ids = meta_data.File
mos = meta_data.MOS.to_numpy()
data_length = mos.shape[0]
pca_d = 10
k = 6
tem_quality = np.zeros((data_length, 1))
fused_quality = np.zeros((data_length, 1))
fused_quality2 = np.zeros((data_length, 1))
lgn_quality = np.zeros((data_length, 1))
V1_quality = np.zeros((data_length, 1))
niqe_quality = np.zeros((data_length, 1))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k120/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc120/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
print(120)
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k240/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc240/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
print(240)
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k480/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc480/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
print(480)
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k960/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc960/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
print(960)
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True)) | 15,842 | 37.641463 | 157 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/gabor_filter-checkpoint.py | import math
import cmath
import torch
import torch.nn as nn
class GaborFilters(nn.Module):
def __init__(self,
n_scale=5,
n_orientation=8,
kernel_radius=9,
row_downsample=4,
column_downsample=4,
device='cpu'):
super().__init__()
self.kernel_size = kernel_radius * 2 + 1
self.kernel_radius = kernel_radius
self.n_scale = n_scale
self.n_orientation = n_orientation
self.row_downsample = row_downsample
self.column_downsample = column_downsample
self.to(device)
self.gb = self.make_gabor_filters().to(device)
def make_gabor_filters(self):
kernel_size = self.kernel_size
n_scale = self.n_scale
n_orientation = self.n_orientation
gb = torch.zeros((n_scale * n_orientation, kernel_size, kernel_size),
dtype=torch.cfloat)
fmax = 0.25
gama = math.sqrt(2)
eta = math.sqrt(2)
for i in range(n_scale):
fu = fmax / (math.sqrt(2)**i)
alpha = fu / gama
beta = fu / eta
for j in range(n_orientation):
tetav = (j / n_orientation) * math.pi
g_filter = torch.zeros((kernel_size, kernel_size),
dtype=torch.cfloat)
for x in range(1, kernel_size + 1):
for y in range(1, kernel_size + 1):
xprime = (x - (
(kernel_size + 1) / 2)) * math.cos(tetav) + (y - (
(kernel_size + 1) / 2)) * math.sin(tetav)
yprime = -(x - (
(kernel_size + 1) / 2)) * math.sin(tetav) + (y - (
(kernel_size + 1) / 2)) * math.cos(tetav)
g_filter[x - 1][
y -
1] = (fu**2 / (math.pi * gama * eta)) * math.exp(-(
(alpha**2) * (xprime**2) + (beta**2) *
(yprime**2))) * cmath.exp(
1j * 2 * math.pi * fu * xprime)
gb[i * n_orientation + j] = g_filter
return gb
def forward(self, x):
batch_size = x.size(0)
cn = x.size(1)
sy = x.size(2)
sx = x.size(3)
assert cn == 1
gb = self.gb
gb = gb[:, None, :, :]
res = nn.functional.conv2d(input=x, weight=gb, padding='same')
res = res.view(batch_size, -1, sy, sx)
res = torch.abs(res)
res = res[:, :, ::self.row_downsample, :]
res = res[:, :, :, ::self.column_downsample]
res = res.reshape(batch_size, res.size(1), -1)
res = (res - torch.mean(res, 2, keepdim=True)) / torch.std(
res, 2, keepdim=True)
res = res.view(batch_size, -1)
return res
if __name__ == "__main__":
import time
from PIL import Image
from torchvision.transforms import transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
img = Image.open(
'/mnt/d/datasets/KonIQ-10k/images_512x384/826373.jpg').convert('L')
img = transforms.ToTensor()(img)
img_imag = torch.zeros(img.size())
img = torch.stack((img, img_imag), 3)
img = torch.view_as_complex(img)
img = img[None, :, :, :]
gb = GaborFilters(device=device)
img = img.to(device)
start_time = time.time()
res = gb(img)
end_time = time.time()
print(res.shape)
print('{}s elapsed running in {}'.format(end_time - start_time, device))
| 3,734 | 31.763158 | 79 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/STEM_pca-checkpoint.py | import math
import scipy.io
import numpy as np
import warnings
import os
import pandas as pd
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
import time
from utilities import *
import pickle
time_cost = 0
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
data_name = 'KoNViD'
if data_name == 'KoNViD':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k_metadata.csv')
flickr_ids = meta_data.flickr_id
mos = meta_data.mos.to_numpy()
elif data_name == 'LiveVQC':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc_metadata.csv')
flickr_ids = meta_data.File
mos = meta_data.MOS.to_numpy()
data_length = mos.shape[0]
pca_d = [2, 3, 5, 10, 30, 50]
k = 6
tem_quality = np.zeros((data_length, 1))
fused_quality = np.zeros((data_length, 1))
fused_quality2 = np.zeros((data_length, 1))
lgn_quality = np.zeros((data_length, 1))
V1_quality = np.zeros((data_length, 1))
niqe_quality = np.zeros((data_length, 1))
for id in range(len(pca_d)):
print(pca_d[id])
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k480/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc480/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d[id])
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d[id])
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
| 5,155 | 37.766917 | 161 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/STEM-checkpoint.py | import math
import scipy.io
import numpy as np
import warnings
import os
import pandas as pd
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
import time
from utilities import *
import pickle
time_cost = 0
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
data_name = 'KoNViD'
if data_name == 'KoNViD':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k_metadata.csv')
flickr_ids = meta_data.flickr_id
mos = meta_data.mos.to_numpy()
elif data_name == 'LiveVQC':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc_metadata.csv')
flickr_ids = meta_data.File
mos = meta_data.MOS.to_numpy()
data_length = mos.shape[0]
k = 6
pca_d = 5
kernel_size = [5, 11, 17, 19, 23, 29, 35, 41, 47, 53]
tem_quality = np.zeros((data_length, 1))
fused_quality = np.zeros((data_length, 1))
fused_quality2 = np.zeros((data_length, 1))
lgn_quality = np.zeros((data_length, 1))
V1_quality = np.zeros((data_length, 1))
niqe_quality = np.zeros((data_length, 1))
for id in range(len(kernel_size)):
print(kernel_size[id])
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k4'+str(kernel_size[id])+'/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc4'+str(kernel_size[id])+'/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
# pca = PCA(n_components=pca_d)
# pca.fit(V1_feature)
# V1_PCA = pca.transform(V1_feature)
#lgn_PCA = lgn_feature[:, :pca_d]
V1_PCA = V1_feature[:, :pca_d]
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
| 5,318 | 38.110294 | 170 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/extract_v1_features-checkpoint.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = 'livevqc'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/data/xkm/datasets/KoNViD_1k_videos/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/data/xkm/datasets/LIVE_VQC/Video'
else:
raise NotImplementedError
feat_path = './features'
save_path = os.path.join(feat_path, data_name)
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
width = 480
height = 270
scale = 5
orientations = 8
kernel_size = 19
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,986 | 30.776596 | 79 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/STEM_resolution-checkpoint.py | import math
import scipy.io
import numpy as np
import warnings
import os
import pandas as pd
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
import time
from utilities import *
import pickle
time_cost = 0
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
data_name = 'KoNViD'
if data_name == 'KoNViD':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/konvid1k_metadata.csv')
flickr_ids = meta_data.flickr_id
mos = meta_data.mos.to_numpy()
elif data_name == 'LiveVQC':
meta_data = pd.read_csv('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/livevqc_metadata.csv')
flickr_ids = meta_data.File
mos = meta_data.MOS.to_numpy()
data_length = mos.shape[0]
pca_d = 10
k = 6
tem_quality = np.zeros((data_length, 1))
fused_quality = np.zeros((data_length, 1))
fused_quality2 = np.zeros((data_length, 1))
lgn_quality = np.zeros((data_length, 1))
V1_quality = np.zeros((data_length, 1))
niqe_quality = np.zeros((data_length, 1))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k120/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc120/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
print(120)
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k240/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc240/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
print(240)
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k480/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc480/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
print(480)
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
for v in range(data_length):
time_start = time.time()
if data_name == 'KoNViD':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/KoNViD/' + str(flickr_ids[v]) + '.mp4.mat')
lgn_feature = lgn_feature_mat['LGN_features_level6']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/konvid1k960/' + str(flickr_ids[v]) + '.mp4.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/KoNViD/'+str(flickr_ids[v])+'.mat')
niqe_score = niqe_score_mat['features_norm22']
elif data_name == 'LiveVQC':
lgn_feature_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/lgn/LiveVQC/' + flickr_ids[v] + '.mat')
lgn_feature = lgn_feature_mat['LGN_features']
V1_feature = np.load('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/resolution/livevqc960/' + flickr_ids[v] + '.npy')
niqe_score_mat = scipy.io.loadmat('/mnt/lustre/lliao/Sourcecode/TPAMI-VQA/V1_extraction/features/NIQE/LiveVQC/'+ flickr_ids[v] + '.mat')
niqe_score = niqe_score_mat['features_norm22']
lgn_feature = np.asarray(lgn_feature, dtype=np.float)
lgn_feature = clear_data(lgn_feature)
V1_feature = np.asarray(V1_feature, dtype=np.float)
V1_feature = clear_data(V1_feature)
pca = PCA(n_components=pca_d)
pca.fit(lgn_feature)
lgn_PCA = pca.transform(lgn_feature)
pca = PCA(n_components=pca_d)
pca.fit(V1_feature)
V1_PCA = pca.transform(V1_feature)
lgn_score = compute_lgn_curvature(lgn_PCA)
v1_score = compute_v1_curvature(V1_PCA)
lgn_quality[v] = math.log(np.mean(lgn_score))
V1_quality[v] = math.log(np.mean(v1_score))
niqe_quality[v] = np.mean(niqe_score)
time_end = time.time()
#print('Video {}, overall {} seconds elapsed...'.format(
# v, time_end - time_start))
temporal_quality = V1_quality + lgn_quality
data = temporal_quality.squeeze()
data = data[~np.isnan(data)]
data = data[~np.isposinf(data)]
temporal_quality = data[~np.isneginf(data)]
mu = np.mean(temporal_quality)
sigma = np.std(temporal_quality)
mu_niqe = np.mean(niqe_quality)
sigma_niqe = np.std(niqe_quality)
niqe_quality = (niqe_quality-mu_niqe)/sigma_niqe*sigma+mu
print(mu_niqe, sigma_niqe, sigma, mu, len(temporal_quality))
fused_quality = (V1_quality + lgn_quality) * niqe_quality
fused_quality2 = (V1_quality) * niqe_quality
print(960)
curvage_mos = fused_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('overall:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
curvage_mos = (V1_quality + lgn_quality)
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('temporal:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = lgn_quality
fused_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), fused_mos)
print('lgn_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True))
#
curvage_mos = V1_quality
tem_mos = mos
curvage_mos = fit_curve(curvage_mos.squeeze(), tem_mos)
print('V1_quality:', compute_metrics(tem_mos.squeeze(), curvage_mos.squeeze(), haveFit=True)) | 15,842 | 37.641463 | 157 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/utilities-checkpoint.py | import numpy as np
from sklearn import linear_model
import scipy
from scipy.optimize import curve_fit
from sklearn.metrics import mean_squared_error
import cv2
from scipy import signal
from sklearn.preprocessing import StandardScaler
def compute_v1_curvature(features):
len = features.shape[0]
curvatures = np.zeros((len-2, 1))
theta = np.zeros((len-2, 1))
distance = np.zeros((len-2, 1))
for fn in range(1, len - 1):
prev = features[fn - 1, :]
cur = features[fn, :]
next = features[fn + 1, :]
numerator = np.dot((next - cur).T, (cur-prev)).squeeze()
denominator = np.linalg.norm(next - cur) * np.linalg.norm(cur - prev)
if denominator<0.0001 or np.abs(numerator)<0.0001:
theta = 3.141592/2
else:
theta = np.arccos(numerator / (0.000001+denominator))
cos_alpha = theta*np.power(np.linalg.norm(next - prev), 1)
#cos_alpha = np.arccos(numerator / (0.000001+denominator))*np.power(np.linalg.norm(next - prev), 0.5)
curvatures[fn - 1] = cos_alpha
# theta[fn - 1] = np.arccos(numerator / (0.000001+denominator))
# distance[fn - 1] = np.linalg.norm(next - prev)
#
# if np.isnan(theta[fn - 1])| np.isposinf(theta[fn - 1]) | np.isneginf(theta[fn - 1]):
# theta = 0
# mu = np.mean(distance)
# sigma = np.std(distance)
#
# mu_niqe = np.mean(theta)
# sigma_niqe = np.std(theta)
#
# theta = (theta-mu_niqe)/sigma_niqe*sigma+mu
# for fn in range(1, len - 1):
# curvatures[fn - 1] = (theta[fn - 1]-np.min(theta))*distance[fn - 1]
# for fn in range(1, len - 1):
# curvatures[fn - 1] = theta[fn - 1]*distance[fn - 1]/np.mean(distance)
return curvatures
def compute_discrete_v1_curvature(features):
len = features.shape[0]
curvatures = np.zeros((len//3, 1))
theta = np.zeros((len//3, 1))
distance = np.zeros((len//3, 1))
for fn in range(0, len//3):
prev = features[fn*3]
cur = features[fn*3+1]
next = features[fn*3+2]
numerator = np.dot((next - cur).T, (cur-prev)).squeeze()
denominator = np.linalg.norm(next - cur) * np.linalg.norm(cur - prev)
if denominator<0.0001 or np.abs(numerator)<0.0001:
theta = 3.141592/2
else:
theta = np.arccos(numerator / (0.000001+denominator))
cos_alpha = theta*np.power(np.linalg.norm(next - prev), 1)
#cos_alpha = np.arccos(numerator / (0.000001+denominator))*np.power(np.linalg.norm(next - prev), 0.5)
curvatures[fn - 1] = cos_alpha
# theta[fn - 1] = np.arccos(numerator / (0.000001+denominator))
# distance[fn - 1] = np.linalg.norm(next - prev)
#
# if np.isnan(theta[fn - 1])| np.isposinf(theta[fn - 1]) | np.isneginf(theta[fn - 1]):
# theta = 0
# mu = np.mean(distance)
# sigma = np.std(distance)
#
# mu_niqe = np.mean(theta)
# sigma_niqe = np.std(theta)
#
# theta = (theta-mu_niqe)/sigma_niqe*sigma+mu
# for fn in range(1, len - 1):
# curvatures[fn - 1] = (theta[fn - 1]-np.min(theta))*distance[fn - 1]
# for fn in range(1, len - 1):
# curvatures[fn - 1] = theta[fn - 1]*distance[fn - 1]/np.mean(distance)
return curvatures
def compute_lgn_curvature(features):
len = features.shape[0]
curvatures = np.zeros((len-2, 1))
theta = np.zeros((len-2, 1))
distance = np.zeros((len-2, 1))
for fn in range(1, len - 1):
prev = features[fn - 1, :]
cur = features[fn, :]
next = features[fn + 1, :]
numerator = np.dot((next - cur).T, (cur-prev)).squeeze()
denominator = np.linalg.norm(next - cur) * np.linalg.norm(cur - prev)
if denominator<0.0001 or np.abs(numerator)<0.0001:
theta = 3.141592/2
else:
theta = np.arccos(numerator / (0.000001+denominator))
cos_alpha = np.power(theta*np.power(np.linalg.norm(next - prev), 0.25), 1)
#cos_alpha = np.arccos(numerator / (0.000001+denominator))*np.power(np.linalg.norm(next - prev), 0.5)
curvatures[fn - 1] = cos_alpha
return curvatures
def linear_pred(features, k):
len = features.shape[0]
linear_error = np.zeros((len - k, 1))
for fn in range(1, len - k):
prev = features[fn:fn+k, :].T
cur = features[fn+k, :]
lr = linear_model.LinearRegression()
lr.fit(prev, cur)
pred = lr.predict(prev)
linear_error[fn] = np.linalg.norm(cur-pred, 1)
return linear_error
def extract_img_features(video_name, rate):
vid_cap = cv2.VideoCapture(video_name)
img_features = []
while 1:
ret, img = vid_cap.read()
if not ret:
break
width = int(img.shape[1] * rate)
height = int(img.shape[0] * rate)
img = cv2.resize(img, dsize=(width,height))
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray_norm = cv2.normalize(img_gray,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
img_features.append(
img_gray_norm.reshape(
img_gray_norm.shape[0] * img_gray_norm.shape[1], 1))
img_features = np.array(img_features).squeeze()
return img_features
def geometric_mean2(data):
total = 1
for i in data:
total*=i
return pow(total, 1/len(data))
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(
np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
def compute_metrics(y_pred, y, haveFit=False):
'''
compute metrics btw predictions & labels
'''
# compute SRCC & KRCC
SRCC = scipy.stats.spearmanr(y, y_pred)[0]
try:
KRCC = scipy.stats.kendalltau(y, y_pred)[0]
except:
KRCC = scipy.stats.kendalltau(y, y_pred, method='asymptotic')[0]
if not haveFit:
# logistic regression btw y_pred & y
beta_init = [np.max(y), np.min(y), np.mean(y_pred), 0.5]
popt, _ = curve_fit(logistic_func,
y_pred,
y,
p0=beta_init,
maxfev=int(1e8))
y_pred_logistic = logistic_func(y_pred, *popt)
else:
y_pred_logistic = y_pred
# compute PLCC RMSE
PLCC = scipy.stats.pearsonr(y, y_pred_logistic)[0]
RMSE = np.sqrt(mean_squared_error(y, y_pred_logistic))
return [SRCC, PLCC, KRCC, RMSE]
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(
np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
def plot_scatter(input_x,
input_y,
save_path,
xlabel='MOS',
ylabel='Curvatures',
haveFit=False):
# 可视化
p = np.polyfit(input_x, input_y, 1).squeeze()
min_val = np.min(input_x)
max_val = np.max(input_x)
x = np.linspace(min_val, max_val, 1000)
f = np.poly1d(p)
y = f(x)
srcc, plcc, krcc, rmse = compute_metrics(input_x.squeeze(),
input_y.squeeze(),
haveFit=haveFit)
plt.rcParams['figure.figsize'] = (8, 6)
plt.scatter(input_x, input_y, s=7.5, c='b', marker='D')
plt.plot(x, y, c='r')
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
plt.title('SRCC: {} | PLCC: {} | RMSE: {}'.format(
round(srcc, 3), round(plcc, 3), round(rmse, 3)), fontsize=20)
# plt.xlim(2.37, 3.78) # 确定横轴坐标范围
# plt.ylim(2.37, 3.78)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
plt.savefig(save_path)
plt.clf()
def clear_data(data):
data[np.isnan(data) | np.isposinf(data)
| np.isneginf(data)] = 0
return data
def clear_mos(data, mos):
data = data.squeeze()
mos = mos[~np.isnan(data)]
data = data[~np.isnan(data)]
mos = mos[~np.isposinf(data)]
data = data[~np.isposinf(data)]
mos = mos[~np.isneginf(data)]
data = data[~np.isneginf(data)]
return data, mos
def fit_curve(x, y):
"""fit x to y"""
# logistic regression
beta_init = [np.max(y), np.min(y), np.mean(x), 0.5]
popt, _ = curve_fit(logistic_func, x, y, p0=beta_init, maxfev=int(1e8))
y_logistic = logistic_func(x, *popt)
return y_logistic
def hysteresis_pooling(chunk):
'''parameters'''
tau = 8 # 2-sec * 30 fps
comb_alpha = 0.2 # weighting
''' function body '''
chunk = np.asarray(chunk, dtype=np.float64)
chunk_length = len(chunk)
l = np.zeros(chunk_length)
m = np.zeros(chunk_length)
q = np.zeros(chunk_length)
for t in range(chunk_length):
''' calculate l[t] - the memory component '''
if t == 0: # corner case
l[t] = chunk[t]
else:
# get previous frame indices
idx_prev = slice(max(0, t-tau), max(0, t-1)+1)
# print(idx_prev)
# calculate min scores
l[t] = min(chunk[idx_prev])
# print("l[t]:", l[t])
''' compute m[t] - the current component '''
if t == chunk_length - 1: # corner case
m[t] = chunk[t]
else:
# get next frame indices
idx_next = slice(t, min(t + tau, chunk_length))
# print(idx_next)
# sort ascend order
v = np.sort(chunk[idx_next])
# generated Gaussian weight
win_len = len(v) * 2.0 - 1.0
win_sigma = win_len / 6.0
# print(win_len, win_sigma)
gaussian_win = signal.gaussian(win_len, win_sigma)
gaussian_half_win = gaussian_win[len(v)-1:]
# normalize gaussian descend kernel
gaussian_half_win = np.divide(gaussian_half_win, np.sum(gaussian_half_win))
# print(gaussian_half_win)
m[t] = sum([x * y for x, y in zip(v, gaussian_half_win)])
# print("m[t]:", m[t])
''' combine l[t] and m[t] into one q[t] '''
q = comb_alpha * l + (1.0 - comb_alpha) * m
# print(q)
# print(np.mean(q))
return q, np.mean(q) | 10,552 | 32.715655 | 109 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/extract_v1_features_480-checkpoint.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = ''
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/lustre/lliao/Dataset/KoNViD_1k/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/lustre/lliao/Dataset/LIVE-VQC/Video'
else:
raise NotImplementedError
width = 480
height = 270
feat_path = './features'
save_path = os.path.join(feat_path, data_name + str(width))
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
scale = 6
orientations = 8
kernel_size = 39
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,991 | 30.829787 | 79 | py |
BVQI | BVQI-master/buona_vista/version.py | __version__ = "0.2.0"
def parse_version_info(version_str):
version_info = []
for x in version_str.split("."):
if x.isdigit():
version_info.append(int(x))
elif x.find("rc") != -1:
patch_version = x.split("rc")
version_info.append(int(patch_version[0]))
version_info.append(f"rc{patch_version[1]}")
return tuple(version_info)
version_info = parse_version_info(__version__)
| 451 | 25.588235 | 56 | py |
BVQI | BVQI-master/buona_vista/__init__.py | from .datasets import *
| 24 | 11.5 | 23 | py |
BVQI | BVQI-master/buona_vista/datasets/fusion_datasets.py | import copy
import glob
import os
import os.path as osp
import random
from functools import lru_cache
import cv2
import decord
import numpy as np
import skvideo.io
import torch
import torchvision
from decord import VideoReader, cpu, gpu
from tqdm import tqdm
random.seed(42)
decord.bridge.set_bridge("torch")
def get_spatial_fragments(
video,
fragments_h=7,
fragments_w=7,
fsize_h=32,
fsize_w=32,
aligned=32,
nfrags=1,
random=False,
random_upsample=False,
fallback_type="upsample",
**kwargs,
):
size_h = fragments_h * fsize_h
size_w = fragments_w * fsize_w
## video: [C,T,H,W]
## situation for images
if video.shape[1] == 1:
aligned = 1
dur_t, res_h, res_w = video.shape[-3:]
ratio = min(res_h / size_h, res_w / size_w)
if fallback_type == "upsample" and ratio < 1:
ovideo = video
video = torch.nn.functional.interpolate(
video / 255.0, scale_factor=1 / ratio, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
if random_upsample:
randratio = random.random() * 0.5 + 1
video = torch.nn.functional.interpolate(
video / 255.0, scale_factor=randratio, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
assert dur_t % aligned == 0, "Please provide match vclip and align index"
size = size_h, size_w
## make sure that sampling will not run out of the picture
hgrids = torch.LongTensor(
[min(res_h // fragments_h * i, res_h - fsize_h) for i in range(fragments_h)]
)
wgrids = torch.LongTensor(
[min(res_w // fragments_w * i, res_w - fsize_w) for i in range(fragments_w)]
)
hlength, wlength = res_h // fragments_h, res_w // fragments_w
if random:
print("This part is deprecated. Please remind that.")
if res_h > fsize_h:
rnd_h = torch.randint(
res_h - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if res_w > fsize_w:
rnd_w = torch.randint(
res_w - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
else:
if hlength > fsize_h:
rnd_h = torch.randint(
hlength - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if wlength > fsize_w:
rnd_w = torch.randint(
wlength - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
target_video = torch.zeros(video.shape[:-2] + size).to(video.device)
# target_videos = []
for i, hs in enumerate(hgrids):
for j, ws in enumerate(wgrids):
for t in range(dur_t // aligned):
t_s, t_e = t * aligned, (t + 1) * aligned
h_s, h_e = i * fsize_h, (i + 1) * fsize_h
w_s, w_e = j * fsize_w, (j + 1) * fsize_w
if random:
h_so, h_eo = rnd_h[i][j][t], rnd_h[i][j][t] + fsize_h
w_so, w_eo = rnd_w[i][j][t], rnd_w[i][j][t] + fsize_w
else:
h_so, h_eo = hs + rnd_h[i][j][t], hs + rnd_h[i][j][t] + fsize_h
w_so, w_eo = ws + rnd_w[i][j][t], ws + rnd_w[i][j][t] + fsize_w
target_video[:, t_s:t_e, h_s:h_e, w_s:w_e] = video[
:, t_s:t_e, h_so:h_eo, w_so:w_eo
]
# target_videos.append(video[:,t_s:t_e,h_so:h_eo,w_so:w_eo])
# target_video = torch.stack(target_videos, 0).reshape((dur_t // aligned, fragments, fragments,) + target_videos[0].shape).permute(3,0,4,1,5,2,6)
# target_video = target_video.reshape((-1, dur_t,) + size) ## Splicing Fragments
return target_video
@lru_cache
def get_resize_function(size_h, size_w, target_ratio=1, random_crop=False):
if random_crop:
return torchvision.transforms.RandomResizedCrop(
(size_h, size_w), scale=(0.40, 1.0)
)
if target_ratio > 1:
size_h = int(target_ratio * size_w)
assert size_h > size_w
elif target_ratio < 1:
size_w = int(size_h / target_ratio)
assert size_w > size_h
return torchvision.transforms.Resize((size_h, size_w))
def get_resized_video(
video, size_h=224, size_w=224, random_crop=False, arp=False, **kwargs,
):
video = video.permute(1, 0, 2, 3)
resize_opt = get_resize_function(
size_h, size_w, video.shape[-2] / video.shape[-1] if arp else 1, random_crop
)
video = resize_opt(video).permute(1, 0, 2, 3)
return video
def get_arp_resized_video(
video, short_edge=224, train=False, **kwargs,
):
if train: ## if during training, will random crop into square and then resize
res_h, res_w = video.shape[-2:]
ori_short_edge = min(video.shape[-2:])
if res_h > ori_short_edge:
rnd_h = random.randrange(res_h - ori_short_edge)
video = video[..., rnd_h : rnd_h + ori_short_edge, :]
elif res_w > ori_short_edge:
rnd_w = random.randrange(res_w - ori_short_edge)
video = video[..., :, rnd_h : rnd_h + ori_short_edge]
ori_short_edge = min(video.shape[-2:])
scale_factor = short_edge / ori_short_edge
ovideo = video
video = torch.nn.functional.interpolate(
video / 255.0, scale_factors=scale_factor, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
return video
def get_arp_fragment_video(
video, short_fragments=7, fsize=32, train=False, **kwargs,
):
if (
train
): ## if during training, will random crop into square and then get fragments
res_h, res_w = video.shape[-2:]
ori_short_edge = min(video.shape[-2:])
if res_h > ori_short_edge:
rnd_h = random.randrange(res_h - ori_short_edge)
video = video[..., rnd_h : rnd_h + ori_short_edge, :]
elif res_w > ori_short_edge:
rnd_w = random.randrange(res_w - ori_short_edge)
video = video[..., :, rnd_h : rnd_h + ori_short_edge]
kwargs["fsize_h"], kwargs["fsize_w"] = fsize, fsize
res_h, res_w = video.shape[-2:]
if res_h > res_w:
kwargs["fragments_w"] = short_fragments
kwargs["fragments_h"] = int(short_fragments * res_h / res_w)
else:
kwargs["fragments_h"] = short_fragments
kwargs["fragments_w"] = int(short_fragments * res_w / res_h)
return get_spatial_fragments(video, **kwargs)
def get_cropped_video(
video, size_h=224, size_w=224, **kwargs,
):
kwargs["fragments_h"], kwargs["fragments_w"] = 1, 1
kwargs["fsize_h"], kwargs["fsize_w"] = size_h, size_w
return get_spatial_fragments(video, **kwargs)
def get_single_view(
video, sample_type="aesthetic", **kwargs,
):
if sample_type.startswith("aesthetic"):
video = get_resized_video(video, **kwargs)
elif sample_type.startswith("technical"):
video = get_spatial_fragments(video, **kwargs)
elif sample_type == "original":
return video
return video
def spatial_temporal_view_decomposition(
video_path, sample_types, samplers, is_train=False, augment=False,
):
video = {}
if video_path.endswith(".yuv"):
print("This part will be deprecated due to large memory cost.")
## This is only an adaptation to LIVE-Qualcomm
ovideo = skvideo.io.vread(
video_path, 1080, 1920, inputdict={"-pix_fmt": "yuvj420p"}
)
for stype in samplers:
frame_inds = samplers[stype](ovideo.shape[0], is_train)
imgs = [torch.from_numpy(ovideo[idx]) for idx in frame_inds]
video[stype] = torch.stack(imgs, 0).permute(3, 0, 1, 2)
del ovideo
else:
vreader = VideoReader(video_path)
### Avoid duplicated video decoding!!! Important!!!!
all_frame_inds = []
frame_inds = {}
for stype in samplers:
frame_inds[stype] = samplers[stype](len(vreader), is_train)
all_frame_inds.append(frame_inds[stype])
### Each frame is only decoded one time!!!
all_frame_inds = np.concatenate(all_frame_inds, 0)
frame_dict = {idx: vreader[idx] for idx in np.unique(all_frame_inds)}
for stype in samplers:
imgs = [frame_dict[idx] for idx in frame_inds[stype]]
video[stype] = torch.stack(imgs, 0).permute(3, 0, 1, 2)
sampled_video = {}
for stype, sopt in sample_types.items():
sampled_video[stype] = get_single_view(video[stype], stype, **sopt)
return sampled_video, frame_inds
import random
import numpy as np
class UnifiedFrameSampler:
def __init__(
self, fsize_t, fragments_t, frame_interval=1, num_clips=1, drop_rate=0.0,
):
self.fragments_t = fragments_t
self.fsize_t = fsize_t
self.size_t = fragments_t * fsize_t
self.frame_interval = frame_interval
self.num_clips = num_clips
self.drop_rate = drop_rate
def get_frame_indices(self, num_frames, train=False):
tgrids = np.array(
[num_frames // self.fragments_t * i for i in range(self.fragments_t)],
dtype=np.int32,
)
tlength = num_frames // self.fragments_t
if tlength > self.fsize_t * self.frame_interval:
rnd_t = np.random.randint(
0, tlength - self.fsize_t * self.frame_interval, size=len(tgrids)
)
else:
rnd_t = np.zeros(len(tgrids), dtype=np.int32)
ranges_t = (
np.arange(self.fsize_t)[None, :] * self.frame_interval
+ rnd_t[:, None]
+ tgrids[:, None]
)
drop = random.sample(
list(range(self.fragments_t)), int(self.fragments_t * self.drop_rate)
)
dropped_ranges_t = []
for i, rt in enumerate(ranges_t):
if i not in drop:
dropped_ranges_t.append(rt)
return np.concatenate(dropped_ranges_t)
def __call__(self, total_frames, train=False, start_index=0):
frame_inds = []
if self.fsize_t < 0:
return np.arange(total_frames)
for i in range(self.num_clips):
frame_inds += [self.get_frame_indices(total_frames)]
frame_inds = np.concatenate(frame_inds)
frame_inds = np.mod(frame_inds + start_index, total_frames)
return frame_inds.astype(np.int32)
class ViewDecompositionDataset(torch.utils.data.Dataset):
def __init__(self, opt):
## opt is a dictionary that includes options for video sampling
super().__init__()
self.weight = opt.get("weight", 0.5)
self.video_infos = []
self.ann_file = opt["anno_file"]
self.data_prefix = opt["data_prefix"]
self.opt = opt
self.sample_types = opt["sample_types"]
self.data_backend = opt.get("data_backend", "disk")
self.augment = opt.get("augment", False)
if self.data_backend == "petrel":
from petrel_client import client
self.client = client.Client(enable_mc=True)
self.phase = opt["phase"]
self.crop = opt.get("random_crop", False)
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
self.samplers = {}
for stype, sopt in opt["sample_types"].items():
if "t_frag" not in sopt:
# resized temporal sampling for TQE in DOVER
self.samplers[stype] = UnifiedFrameSampler(
sopt["clip_len"], sopt["num_clips"], sopt["frame_interval"]
)
else:
# temporal sampling for AQE in DOVER
self.samplers[stype] = UnifiedFrameSampler(
sopt["clip_len"] // sopt["t_frag"],
sopt["t_frag"],
sopt["frame_interval"],
sopt["num_clips"],
)
print(
stype + " branch sampled frames:",
self.samplers[stype](240, self.phase == "train"),
)
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
try:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
except:
#### No Label Testing
video_filenames = []
for (root, dirs, files) in os.walk(self.data_prefix, topdown=True):
for file in files:
if file.endswith(".mp4"):
video_filenames += [os.path.join(root, file)]
print(len(video_filenames))
for filename in video_filenames:
self.video_infos.append(dict(filename=filename, label=-1))
def __getitem__(self, index):
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
try:
## Read Original Frames
## Process Frames
data, frame_inds = spatial_temporal_view_decomposition(
filename,
self.sample_types,
self.samplers,
self.phase == "train",
self.augment and (self.phase == "train"),
)
for k, v in data.items():
data[k] = ((v.permute(1, 2, 3, 0) - self.mean) / self.std).permute(
3, 0, 1, 2
)
data["num_clips"] = {}
for stype, sopt in self.sample_types.items():
data["num_clips"][stype] = sopt["num_clips"]
data["frame_inds"] = frame_inds
data["gt_label"] = label
data["name"] = filename # osp.basename(video_info["filename"])
except:
# exception flow
return {"name": filename}
return data
def __len__(self):
return len(self.video_infos)
| 14,666 | 34.257212 | 149 | py |
BVQI | BVQI-master/buona_vista/datasets/__init__.py | ## API for DOVER and its variants
from .basic_datasets import *
from .fusion_datasets import *
| 95 | 23 | 33 | py |
BVQI | BVQI-master/buona_vista/datasets/basic_datasets.py | import os.path as osp
import random
import cv2
import decord
import numpy as np
import skvideo.io
import torch
import torchvision
from decord import VideoReader, cpu, gpu
from tqdm import tqdm
random.seed(42)
decord.bridge.set_bridge("torch")
def get_spatial_fragments(
video,
fragments_h=7,
fragments_w=7,
fsize_h=32,
fsize_w=32,
aligned=32,
nfrags=1,
random=False,
fallback_type="upsample",
):
size_h = fragments_h * fsize_h
size_w = fragments_w * fsize_w
## situation for images
if video.shape[1] == 1:
aligned = 1
dur_t, res_h, res_w = video.shape[-3:]
ratio = min(res_h / size_h, res_w / size_w)
if fallback_type == "upsample" and ratio < 1:
ovideo = video
video = torch.nn.functional.interpolate(
video / 255.0, scale_factor=1 / ratio, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
assert dur_t % aligned == 0, "Please provide match vclip and align index"
size = size_h, size_w
## make sure that sampling will not run out of the picture
hgrids = torch.LongTensor(
[min(res_h // fragments_h * i, res_h - fsize_h) for i in range(fragments_h)]
)
wgrids = torch.LongTensor(
[min(res_w // fragments_w * i, res_w - fsize_w) for i in range(fragments_w)]
)
hlength, wlength = res_h // fragments_h, res_w // fragments_w
if random:
print("This part is deprecated. Please remind that.")
if res_h > fsize_h:
rnd_h = torch.randint(
res_h - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if res_w > fsize_w:
rnd_w = torch.randint(
res_w - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
else:
if hlength > fsize_h:
rnd_h = torch.randint(
hlength - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if wlength > fsize_w:
rnd_w = torch.randint(
wlength - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
target_video = torch.zeros(video.shape[:-2] + size).to(video.device)
# target_videos = []
for i, hs in enumerate(hgrids):
for j, ws in enumerate(wgrids):
for t in range(dur_t // aligned):
t_s, t_e = t * aligned, (t + 1) * aligned
h_s, h_e = i * fsize_h, (i + 1) * fsize_h
w_s, w_e = j * fsize_w, (j + 1) * fsize_w
if random:
h_so, h_eo = rnd_h[i][j][t], rnd_h[i][j][t] + fsize_h
w_so, w_eo = rnd_w[i][j][t], rnd_w[i][j][t] + fsize_w
else:
h_so, h_eo = hs + rnd_h[i][j][t], hs + rnd_h[i][j][t] + fsize_h
w_so, w_eo = ws + rnd_w[i][j][t], ws + rnd_w[i][j][t] + fsize_w
target_video[:, t_s:t_e, h_s:h_e, w_s:w_e] = video[
:, t_s:t_e, h_so:h_eo, w_so:w_eo
]
# target_videos.append(video[:,t_s:t_e,h_so:h_eo,w_so:w_eo])
# target_video = torch.stack(target_videos, 0).reshape((dur_t // aligned, fragments, fragments,) + target_videos[0].shape).permute(3,0,4,1,5,2,6)
# target_video = target_video.reshape((-1, dur_t,) + size) ## Splicing Fragments
return target_video
class FragmentSampleFrames:
def __init__(self, fsize_t, fragments_t, frame_interval=1, num_clips=1):
self.fragments_t = fragments_t
self.fsize_t = fsize_t
self.size_t = fragments_t * fsize_t
self.frame_interval = frame_interval
self.num_clips = num_clips
def get_frame_indices(self, num_frames):
tgrids = np.array(
[num_frames // self.fragments_t * i for i in range(self.fragments_t)],
dtype=np.int32,
)
tlength = num_frames // self.fragments_t
if tlength > self.fsize_t * self.frame_interval:
rnd_t = np.random.randint(
0, tlength - self.fsize_t * self.frame_interval, size=len(tgrids)
)
else:
rnd_t = np.zeros(len(tgrids), dtype=np.int32)
ranges_t = (
np.arange(self.fsize_t)[None, :] * self.frame_interval
+ rnd_t[:, None]
+ tgrids[:, None]
)
return np.concatenate(ranges_t)
def __call__(self, total_frames, train=False, start_index=0):
frame_inds = []
for i in range(self.num_clips):
frame_inds += [self.get_frame_indices(total_frames)]
frame_inds = np.concatenate(frame_inds)
frame_inds = np.mod(frame_inds + start_index, total_frames)
return frame_inds
class SampleFrames:
def __init__(self, clip_len, frame_interval=1, num_clips=1):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips
)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(num_frames - ori_clip_len + 1, size=self.num_clips)
)
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips,), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames, start_index=0):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int32)
else:
clip_offsets = np.zeros((self.num_clips,), dtype=np.int32)
return clip_offsets
def __call__(self, total_frames, train=False, start_index=0):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if train:
clip_offsets = self._get_train_clips(total_frames)
else:
clip_offsets = self._get_test_clips(total_frames)
frame_inds = (
clip_offsets[:, None]
+ np.arange(self.clip_len)[None, :] * self.frame_interval
)
frame_inds = np.concatenate(frame_inds)
frame_inds = frame_inds.reshape((-1, self.clip_len))
frame_inds = np.mod(frame_inds, total_frames)
frame_inds = np.concatenate(frame_inds) + start_index
return frame_inds.astype(np.int32)
class FastVQAPlusPlusDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
frame_interval=2,
aligned=32,
fragments=(8, 8, 8),
fsize=(4, 32, 32),
num_clips=1,
nfrags=1,
cache_in_memory=False,
phase="test",
fallback_type="oversample",
):
"""
Fragments.
args:
fragments: G_f as in the paper.
fsize: S_f as in the paper.
nfrags: number of samples (spatially) as in the paper.
num_clips: number of samples (temporally) as in the paper.
"""
self.ann_file = ann_file
self.data_prefix = data_prefix
self.frame_interval = frame_interval
self.num_clips = num_clips
self.fragments = fragments
self.fsize = fsize
self.nfrags = nfrags
self.clip_len = fragments[0] * fsize[0]
self.aligned = aligned
self.fallback_type = fallback_type
self.sampler = FragmentSampleFrames(
fsize[0], fragments[0], frame_interval, num_clips
)
self.video_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, tocache=False, need_original_frames=False,
):
if tocache or self.cache is None:
fx, fy = self.fragments[1:]
fsx, fsy = self.fsize[1:]
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
if filename.endswith(".yuv"):
video = skvideo.io.vread(
filename, 1080, 1920, inputdict={"-pix_fmt": "yuvj420p"}
)
frame_inds = self.sampler(video.shape[0], self.phase == "train")
imgs = [torch.from_numpy(video[idx]) for idx in frame_inds]
else:
vreader = VideoReader(filename)
frame_inds = self.sampler(len(vreader), self.phase == "train")
frame_dict = {idx: vreader[idx] for idx in np.unique(frame_inds)}
imgs = [frame_dict[idx] for idx in frame_inds]
img_shape = imgs[0].shape
video = torch.stack(imgs, 0)
video = video.permute(3, 0, 1, 2)
if self.nfrags == 1:
vfrag = get_spatial_fragments(
video,
fx,
fy,
fsx,
fsy,
aligned=self.aligned,
fallback_type=self.fallback_type,
)
else:
vfrag = get_spatial_fragments(
video,
fx,
fy,
fsx,
fsy,
aligned=self.aligned,
fallback_type=self.fallback_type,
)
for i in range(1, self.nfrags):
vfrag = torch.cat(
(
vfrag,
get_spatial_fragments(
video,
fragments,
fx,
fy,
fsx,
fsy,
aligned=self.aligned,
fallback_type=self.fallback_type,
),
),
1,
)
if tocache:
return (vfrag, frame_inds, label, img_shape)
else:
vfrag, frame_inds, label, img_shape = self.cache[index]
vfrag = ((vfrag.permute(1, 2, 3, 0) - self.mean) / self.std).permute(3, 0, 1, 2)
data = {
"video": vfrag.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + vfrag.shape[2:]
).transpose(
0, 1
), # B, V, T, C, H, W
"frame_inds": frame_inds,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_video"] = video.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + video.shape[2:]
).transpose(0, 1)
return data
def __len__(self):
return len(self.video_infos)
class FragmentVideoDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
clip_len=32,
frame_interval=2,
num_clips=4,
aligned=32,
fragments=7,
fsize=32,
nfrags=1,
cache_in_memory=False,
phase="test",
):
"""
Fragments.
args:
fragments: G_f as in the paper.
fsize: S_f as in the paper.
nfrags: number of samples as in the paper.
"""
self.ann_file = ann_file
self.data_prefix = data_prefix
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.fragments = fragments
self.fsize = fsize
self.nfrags = nfrags
self.aligned = aligned
self.sampler = SampleFrames(clip_len, frame_interval, num_clips)
self.video_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, fragments=-1, fsize=-1, tocache=False, need_original_frames=False,
):
if tocache or self.cache is None:
if fragments == -1:
fragments = self.fragments
if fsize == -1:
fsize = self.fsize
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
if filename.endswith(".yuv"):
video = skvideo.io.vread(
filename, 1080, 1920, inputdict={"-pix_fmt": "yuvj420p"}
)
frame_inds = self.sampler(video.shape[0], self.phase == "train")
imgs = [torch.from_numpy(video[idx]) for idx in frame_inds]
else:
vreader = VideoReader(filename)
frame_inds = self.sampler(len(vreader), self.phase == "train")
frame_dict = {idx: vreader[idx] for idx in np.unique(frame_inds)}
imgs = [frame_dict[idx] for idx in frame_inds]
img_shape = imgs[0].shape
video = torch.stack(imgs, 0)
video = video.permute(3, 0, 1, 2)
if self.nfrags == 1:
vfrag = get_spatial_fragments(
video, fragments, fragments, fsize, fsize, aligned=self.aligned
)
else:
vfrag = get_spatial_fragments(
video, fragments, fragments, fsize, fsize, aligned=self.aligned
)
for i in range(1, self.nfrags):
vfrag = torch.cat(
(
vfrag,
get_spatial_fragments(
video,
fragments,
fragments,
fsize,
fsize,
aligned=self.aligned,
),
),
1,
)
if tocache:
return (vfrag, frame_inds, label, img_shape)
else:
vfrag, frame_inds, label, img_shape = self.cache[index]
vfrag = ((vfrag.permute(1, 2, 3, 0) - self.mean) / self.std).permute(3, 0, 1, 2)
data = {
"video": vfrag.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + vfrag.shape[2:]
).transpose(
0, 1
), # B, V, T, C, H, W
"frame_inds": frame_inds,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_video"] = video.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + video.shape[2:]
).transpose(0, 1)
return data
def __len__(self):
return len(self.video_infos)
class ResizedVideoDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
clip_len=32,
frame_interval=2,
num_clips=4,
aligned=32,
size=224,
cache_in_memory=False,
phase="test",
):
"""
Using resizing.
"""
self.ann_file = ann_file
self.data_prefix = data_prefix
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.size = size
self.aligned = aligned
self.sampler = SampleFrames(clip_len, frame_interval, num_clips)
self.video_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching resized videos"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(self, index, tocache=False, need_original_frames=False):
if tocache or self.cache is None:
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
vreader = VideoReader(filename)
frame_inds = self.sampler(len(vreader), self.phase == "train")
frame_dict = {idx: vreader[idx] for idx in np.unique(frame_inds)}
imgs = [frame_dict[idx] for idx in frame_inds]
img_shape = imgs[0].shape
video = torch.stack(imgs, 0)
video = video.permute(3, 0, 1, 2)
video = torch.nn.functional.interpolate(video, size=(self.size, self.size))
if tocache:
return (vfrag, frame_inds, label, img_shape)
else:
vfrag, frame_inds, label, img_shape = self.cache[index]
vfrag = ((vfrag.permute(1, 2, 3, 0) - self.mean) / self.std).permute(3, 0, 1, 2)
data = {
"video": vfrag.reshape(
(-1, self.num_clips, self.clip_len) + vfrag.shape[2:]
).transpose(
0, 1
), # B, V, T, C, H, W
"frame_inds": frame_inds,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_video"] = video.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + video.shape[2:]
).transpose(0, 1)
return data
def __len__(self):
return len(self.video_infos)
class CroppedVideoDataset(FragmentVideoDataset):
def __init__(
self,
ann_file,
data_prefix,
clip_len=32,
frame_interval=2,
num_clips=4,
aligned=32,
size=224,
ncrops=1,
cache_in_memory=False,
phase="test",
):
"""
Regard Cropping as a special case for Fragments in Grid 1*1.
"""
super().__init__(
ann_file,
data_prefix,
clip_len=clip_len,
frame_interval=frame_interval,
num_clips=num_clips,
aligned=aligned,
fragments=1,
fsize=224,
nfrags=ncrops,
cache_in_memory=cache_in_memory,
phase=phase,
)
class FragmentImageDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
fragments=7,
fsize=32,
nfrags=1,
cache_in_memory=False,
phase="test",
):
self.ann_file = ann_file
self.data_prefix = data_prefix
self.fragments = fragments
self.fsize = fsize
self.nfrags = nfrags
self.image_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.image_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.image_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, fragments=-1, fsize=-1, tocache=False, need_original_frames=False
):
if tocache or self.cache is None:
if fragments == -1:
fragments = self.fragments
if fsize == -1:
fsize = self.fsize
image_info = self.image_infos[index]
filename = image_info["filename"]
label = image_info["label"]
try:
img = torchvision.io.read_image(filename)
except:
img = cv2.imread(filename)
img = torch.from_numpy(img[:, :, [2, 1, 0]]).permute(2, 0, 1)
img_shape = img.shape[1:]
image = img.unsqueeze(1)
if self.nfrags == 1:
ifrag = get_spatial_fragments(image, fragments, fragments, fsize, fsize)
else:
ifrag = get_spatial_fragments(image, fragments, fragments, fsize, fsize)
for i in range(1, self.nfrags):
ifrag = torch.cat(
(
ifrag,
get_spatial_fragments(
image, fragments, fragments, fsize, fsize
),
),
1,
)
if tocache:
return (ifrag, label, img_shape)
else:
ifrag, label, img_shape = self.cache[index]
if self.nfrags == 1:
ifrag = (
((ifrag.permute(1, 2, 3, 0) - self.mean) / self.std)
.squeeze(0)
.permute(2, 0, 1)
)
else:
### During testing, one image as a batch
ifrag = (
((ifrag.permute(1, 2, 3, 0) - self.mean) / self.std)
.squeeze(0)
.permute(0, 3, 1, 2)
)
data = {
"image": ifrag,
"gt_label": label,
"original_shape": img_shape,
"name": filename,
}
if need_original_frames:
data["original_image"] = image.squeeze(1)
return data
def __len__(self):
return len(self.image_infos)
class ResizedImageDataset(torch.utils.data.Dataset):
def __init__(
self, ann_file, data_prefix, size=224, cache_in_memory=False, phase="test",
):
self.ann_file = ann_file
self.data_prefix = data_prefix
self.size = size
self.image_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.image_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.image_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, fragments=-1, fsize=-1, tocache=False, need_original_frames=False
):
if tocache or self.cache is None:
if fragments == -1:
fragments = self.fragments
if fsize == -1:
fsize = self.fsize
image_info = self.image_infos[index]
filename = image_info["filename"]
label = image_info["label"]
img = torchvision.io.read_image(filename)
img_shape = img.shape[1:]
image = img.unsqueeze(1)
if self.nfrags == 1:
ifrag = get_spatial_fragments(image, fragments, fsize)
else:
ifrag = get_spatial_fragments(image, fragments, fsize)
for i in range(1, self.nfrags):
ifrag = torch.cat(
(ifrag, get_spatial_fragments(image, fragments, fsize)), 1
)
if tocache:
return (ifrag, label, img_shape)
else:
ifrag, label, img_shape = self.cache[index]
ifrag = (
((ifrag.permute(1, 2, 3, 0) - self.mean) / self.std)
.squeeze(0)
.permute(2, 0, 1)
)
data = {
"image": ifrag,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_image"] = image.squeeze(1)
return data
def __len__(self):
return len(self.image_infos)
class CroppedImageDataset(FragmentImageDataset):
def __init__(
self,
ann_file,
data_prefix,
size=224,
ncrops=1,
cache_in_memory=False,
phase="test",
):
"""
Regard Cropping as a special case for Fragments in Grid 1*1.
"""
super().__init__(
ann_file,
data_prefix,
fragments=1,
fsize=224,
nfrags=ncrops,
cache_in_memory=cache_in_memory,
phase=phase,
)
| 29,085 | 34.776138 | 149 | py |
BVQI | BVQI-master/buona_vista/.ipynb_checkpoints/__init__-checkpoint.py | from .datasets import *
| 24 | 11.5 | 23 | py |
BVQI | BVQI-master/buona_vista/.ipynb_checkpoints/version-checkpoint.py | __version__ = "0.2.0"
def parse_version_info(version_str):
version_info = []
for x in version_str.split("."):
if x.isdigit():
version_info.append(int(x))
elif x.find("rc") != -1:
patch_version = x.split("rc")
version_info.append(int(patch_version[0]))
version_info.append(f"rc{patch_version[1]}")
return tuple(version_info)
version_info = parse_version_info(__version__)
| 451 | 25.588235 | 56 | py |
ug-dissertation | ug-dissertation-main/sim.py | from gensim import models
import time
start_time = time.perf_counter()
print('\nLoading vectors...\n')
w = models.KeyedVectors.load_word2vec_format('/home/ubuntu/sim/CBOW|skipgram.bin', binary=True)
relations = {'': [''],
'': [''],
'': ['']}
original_verbs = list(relations.keys())
for verb in original_verbs:
print('\n\n')
for paraverb in relations[verb]:
print('{}-{}: {:.5f}'.format(verb, paraverb, w.similarity(''.join([i for i in verb if not i.isdigit()]), paraverb)))
time_taken = time.perf_counter() - start_time
print('\n{}\nDone in {:.5f} seconds.'.format('-' * 25, time_taken))
| 639 | 24.6 | 124 | py |
ug-dissertation | ug-dissertation-main/bnc.py | import nltk.corpus.reader.bnc
import time
import os
import ast
# ==========================================================================================================
"""
Read BNC data. Target specific folders (aca, dem, fic, news) with regex in
fileids parameter.
"""
start_time = time.perf_counter()
BNC_data = nltk.corpus.reader.bnc.BNCCorpusReader(root='/home/ubuntu/ug-d/bncbaby/',
fileids=r'aca/\w*\.xml', # r'aca/\w*\.xml', # r'[a-z]{3}/\w*\.xml')
lazy=False) # found here: https://github.com/nltk/nltk/issues/781 talk about how much more efficient it is
time_taken = time.perf_counter() - start_time
print('\n|| Successfully loaded the British National Corpus in {:.1f}'.format(time_taken), 'seconds. ||\n')
# ==========================================================================================================
def listall():
"""
Prints a list of all files in the dataset and
the number of sentences in each file.
>> python3 -c 'import ubnc; ubnc.listall()'
"""
start_time = time.perf_counter()
sent_counter = 0
file_counter = len(BNC_data.fileids())
for fileid in BNC_data.fileids():
number_of_sents = len(BNC_data.sents(fileid))
sent_counter += number_of_sents
print(fileid, number_of_sents)
time_taken = time.perf_counter() - start_time
print('\n || {} sentences across {} files. ||\n'.format(sent_counter, file_counter), '|| That took: {:.1f}'.format(time_taken), 'seconds. ||\n')
# ==========================================================================================================
def search(verb):
"""
Saves a list of sentences containing the search term 'word'. Scraped data
is output to out/word/cat-$FILE.txt in the form:
{x: [('stem', 'TAG'), (...)], [...]}
A dictionary whose key 'x' is the position of the dict's value in
the original .xml; and whose value is a list of tuples where the
first item of the tuple is the stem form of a word and the second
item is the TAG foundin the British National Corpus.
>> python3 -c 'import ubnc; ubnc.search("verb")'
"""
start_time = time.perf_counter()
sent_counter = 0
print("Calculating total number of sentences...")
total_sents = len(BNC_data.sents())
total_files_counter = 0
num_words = 0
print("Calculating total number of words...\n")
total_words = len(BNC_data.words())
match_files = []
logfile_name = "log.txt"
for fileid in BNC_data.fileids():
sentences_in_file = BNC_data.tagged_sents(fileid, stem=True) # c5=True
total_files_counter += 1
sent_list = []
"""
- The outfile_name is in the form: {cat}_{file}.txt where cat(egory)
can be aca, dem, fic, news and file is the name of the source .xml.
- A counter is created which will later be used to print how many
sentences have been saved per file.
- A summary of the scraped data is written to word/log.txt
"""
outfile_name = "{}.txt".format(fileid)
outfile_name = outfile_name.replace("/", "-")
outfile_name = outfile_name.replace(".xml", "")
counter = 0
# Look through sentences for instances of 'word'.
with open("/home/ubuntu/ug-d/out/{}/{}".format(verb, outfile_name), "a") as outfile:
outfile.write("{")
for position, sentence in enumerate(sentences_in_file[0:len(sentences_in_file)]):
for tup in sentence:
if verb in tup:
# If a match is found, write tagged data to outfile.
with open("/home/ubuntu/ug-d/out/{}/{}".format(verb, outfile_name), "a") as outfile:
outfile.write("{}: {}, ".format(position, sentence))
# Update values and print message for each file.
counter += 1
sent_list.append(position)
num_words += len(sentence)
sent_counter += 1
match_files.append(outfile_name)
statusmsg_filesents = "{} \n| {} sentences saved to out/{}/{}.\n".format(sent_list, len(sent_list), verb, outfile_name)
# Write to logfile and print status for each file. Remove outfile if empty.
with open("/home/ubuntu/ug-d/out/{}/{}".format(verb, outfile_name), "a") as outfile:
outfile.write("}")
with open("/home/ubuntu/ug-d/out/{}/{}".format(verb, logfile_name), "a") as logfile:
logfile.write(statusmsg_filesents)
if len(sent_list) == 0:
os.remove("/home/ubuntu/ug-d/out/{}/{}".format(verb, outfile_name))
print(statusmsg_filesents)
# Closing message printed to console and added to log.
time_taken = time.perf_counter() - start_time
statusmsg_final = "{}\nFound '{}' in {} / {} sentences across {} / {} files.\nScraped {} words out of {}. \n\n|| That took: {:.1f} seconds. ||\n".format("-" * 75, verb, sent_counter, total_sents, len(set(match_files)), total_files_counter, num_words, total_words, time_taken)
with open("/home/ubuntu/ug-d/out/{}/{}".format(verb, logfile_name), "a") as logfile:
logfile.write(statusmsg_final)
print(statusmsg_final)
# ==========================================================================================================
def getVV(verb, cat):
"""
Merges the files created by search(word) and flattentuple(word), saving
only instances of ("word", V), ("?", V).
First load all files corresponding to a single category into /out/{word}/
by running search("{word}") with a suitable regex.
Then run getVV for each of the four categories.
>> python3 -c 'import ubnc; ubnc.getVV("word", "cat")' | "begin", "aca"
"""
start_time = time.perf_counter()
total_files = 0
total_sents = 0
directory = "/home/ubuntu/ug-d/out/{}/{}/".format(verb, cat)
merge_dir = "/home/ubuntu/ug-d/out-merge/{}/".format(verb)
print(directory)
with open("{}{}VV.txt".format(merge_dir, cat), "a") as mergefile:
mergefile.write("{")
for file in os.listdir(directory):
if not file.endswith("log.txt"):
total_files += 1
filetag = file.replace(".txt", "").replace("-", "")
filetag = filetag.replace(cat, "")
with open("{}{}".format(directory, file), "r") as oldfile:
oldfile = ast.literal_eval(oldfile.read())
all_keys = list(oldfile.keys())
with open("{}{}VV.txt".format(merge_dir, cat), "a") as mergefile:
for key in all_keys:
for x, y in zip(oldfile[key], oldfile[key][1:]):
if x[0] == verb and x[1] == "VERB" and y[1] == "VERB":
total_sents += 1
print("\n{} Sentence found in: {} {}".format("-" * 15, file, "-" * 15))
mergefile.write("'{}-{}': {}, ".format(filetag, key, oldfile[key]))
print("{}\n".format(oldfile[key]))
with open("{}{}VV.txt".format(merge_dir, cat), "a") as mergefile:
mergefile.write("}")
time_taken = time.perf_counter() - start_time
print("\n{}\nProcessed {} sentences across {} files.\n|| That took: {:.1f} seconds. ||\n".format("-" * 75, total_sents, total_files, time_taken))
# ==========================================================================================================
def getVNP(verb, cat):
"""
Merges the files created by search(verb) and flattentuple(verb), saving
only instances of ("verb", V), ("the", ART).
First load all files corresponding to a single category into /out/{verb}/
by running search("{verb}") with a suitable regex.
Then run getVNP for each of the four categories.
>> python3 -c 'import ubnc; ubnc.getVNP("verb", "cat")' | "begin", "aca"
"""
start_time = time.perf_counter()
total_files = 0
total_sents = 0
directory = "/home/ubuntu/ug-d/out/{}/{}/".format(verb, cat)
merge_dir = "/home/ubuntu/ug-d/out-merge/{}/".format(verb)
with open("{}{}VNP.txt".format(merge_dir, cat), "a") as mergefile:
mergefile.write("{")
for file in os.listdir(directory):
if not file.endswith("log.txt"):
total_files += 1
filetag = file.replace(".txt", "").replace("-", "")
filetag = filetag.replace(cat, "")
with open("{}{}".format(directory, file), "r") as oldfile:
oldfile = ast.literal_eval(oldfile.read())
all_keys = list(oldfile.keys())
with open("{}{}VNP.txt".format(merge_dir, cat), "a") as mergefile:
for key in all_keys:
for x, y in zip(oldfile[key], oldfile[key][1:]):
if x[0] == verb and x[1] == "VERB" and y[0] == "the" and y[1] == "ART":
total_sents += 1
print("\n{} Sentence found in: {} {}".format("-" * 15, file, "-" * 15))
mergefile.write("'{}-{}': {}, ".format(filetag, key, oldfile[key]))
print("{}\n".format(oldfile[key]))
with open("{}{}VNP.txt".format(merge_dir, cat), "a") as mergefile:
mergefile.write("}")
time_taken = time.perf_counter() - start_time
print("\n{}\nProcessed {} sentences across {} files.\n|| That took: {:.1f} seconds. ||\n".format("-" * 75, total_sents, total_files, time_taken))
# ==========================================================================================================
def targetVNP(verb):
"""
Uses the tuples created by getVV(verb), getVNP(verb) which were
subsequently stored in /out-merge/verb/{CAT}-{VV|VNP}.txt.
Then trims sentences to get target NPs, storing snippets as
a list at /out-merge/{verb}/catVNP-snippets.
e.g. [ [('begin', 'VERB'), ('the', 'ART'), ('research', 'SUBST')], [...] ]
>> python3 -c 'import ubnc; ubnc.targetVNP("verb")' | "begin"
"""
start_time = time.perf_counter()
total_files = 0
total_sents = 0
directory = "/home/ubuntu/ug-d/out-merge/{}/".format(verb)
for file in os.listdir(directory):
if not file.endswith("log.txt") and file.endswith("VNP.txt"):
print("\n{} FILE BEING PROCESSED: {} {}".format("-" * 15, file, "-" * 15))
total_files += 1
with open("{}{}".format(directory, file), "r") as oldfile:
oldfile = ast.literal_eval(oldfile.read())
all_keys = list(oldfile.keys())
num_sentences = len(all_keys)
total_sents += num_sentences
file = file.replace(".txt", "")
with open("{}/{}-snippets.txt".format(directory, file), "a") as newfile:
newfile.write("[")
for key in all_keys:
snippet_index = [x for x, y in enumerate(oldfile[key]) if y[0] == verb]
snippet_index = snippet_index[0] # snippet_index is an integer.
snippet = oldfile[key][snippet_index:]
end_index = [x for x, y in enumerate(snippet) if y[1] == 'SUBST']
end_index = end_index[0] + 1
snippet = snippet[:end_index]
with open("{}/{}-snippets.txt".format(directory, file), "a") as newfile:
newfile.write("{}, ".format(snippet))
with open("{}/{}-snippets.txt".format(directory, file), "a") as newfile:
newfile.write("]")
time_taken = time.perf_counter() - start_time
print("\n{}\nProcessed {} sentences across {} files.\n|| That took: {:.1f} seconds. ||\n".format("-" * 75, total_sents, total_files, time_taken))
# ==========================================================================================================
def retrosearch(verb, cat):
"""
Retrosearch writes to /out-para/verb/retroNPs.txt the sentences
which contain one of the target NPs found to be a collocate
to the original verbs.
>> python3 -c 'import ubnc; ubnc.retrosearch("finish", "dem")'
"""
start_time = time.perf_counter()
sent_counter = 0
print("Calculating total number of sentences...")
total_sents = len(BNC_data.sents())
total_files_counter = 0
num_words = 0
print("Calculating total number of words...\n")
total_words = len(BNC_data.words())
# Generate list of nouns that have appeared alongside 'verb'.
nouns = []
with open("/home/ubuntu/ug-d/out-merge/{}/{}VNP-snippets.txt".format(verb, cat)) as snippet_file:
snippet_list = ast.literal_eval(snippet_file.read())
for snippet in snippet_list:
nouns.append([tup[0] for tup in snippet if tup[1] == 'SUBST'][0])
match_files = []
paraverb_sents = []
for fileid in BNC_data.fileids():
sentences_in_file = BNC_data.tagged_sents(fileid, stem=False) # c5=True
total_files_counter += 1
# Sentences that contain a verb which could potentially
# paraphrase the original verb.
for noun in nouns:
for sentence in sentences_in_file:
for tup in sentence:
if tup[0] == noun and tup[1] == 'SUBST':
sentence = [x[0] for x in sentence]
sentence = " ".join(sentence)
paraverb_sents.append(sentence)
num_words += len(sentence)
sent_counter += 1
match_files.append(fileid)
# Write list of sentences out to a file.
with open("/home/ubuntu/ug-d/out-para/{}/{}-retroNPs.txt".format(verb, cat), "a") as outfile:
outfile.write(str(paraverb_sents))
statusmsg_filesents = "\n| {} sentences saved to out-para/{}\n".format(sent_counter, verb)
print(statusmsg_filesents)
# Closing message printed to console and added to log.
time_taken = time.perf_counter() - start_time
statusmsg_final = "{}\nTarget NPs:{}\n\nFound {} NP-containing sentences (out of a total of {} sentences) across {} / {} files.\nScraped {} words out of {}. \n\n|| That took: {:.1f} seconds. ||\n".format("-" * 75, nouns, sent_counter, total_sents, len(set(match_files)), total_files_counter, num_words, total_words, time_taken)
with open("/home/ubuntu/ug-d/out-para/{}/log.txt".format(verb), "a") as logfile:
logfile.write(statusmsg_final)
print(statusmsg_final)
| 14,631 | 37.914894 | 331 | py |
MachineUnlearningPy | MachineUnlearningPy-master/setup.py | from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
readme = fh.read()
setup(
name="lenskit",
version="0.6.1",
author="Michael Ekstrand",
author_email="[email protected]",
description="Run recommender algorithms and experiments",
long_description=readme,
url="https://lkpy.lenskit.org",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
python_requires='>= 3.5',
setup_requires=[
'pytest-runner'
],
install_requires=[
'pandas >= 0.20',
'numpy',
'scipy',
'numba >= 0.38',
'pyarrow',
'cffi'
],
tests_require=[
'pytest >= 3.9',
'pytest-doctestplus'
],
extras_require={
'docs': [
'sphinx >= 1.8',
'sphinx_rtd_theme',
'nbsphinx',
'recommonmark',
'ipython'
],
'hpf': [
'hpfrec'
],
'implicit': [
'implicit'
]
},
packages=find_packages()
)
| 1,329 | 22.75 | 68 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tasks.py | import os
from invoke import task
from invoke.exceptions import Failure
from invoke.runners import Result
import shutil
@task
def test(c, cover=False, verbose=True, slow=True, eval=True, match=None, mark=None, debug=False,
forked=False, fail_fast=False, jit=True):
"Run tests"
if not jit:
os.environ['NUMBA_DISABLE_JIT'] = '1'
import pytest
args = ['tests']
if cover:
args.append('--cov=lenskit')
if verbose:
args.append('--verbose')
if fail_fast:
args.append('-x')
if not slow:
args.append('-m')
args.append('not slow')
elif not eval:
args.append('-m')
args.append('not eval')
if match:
args.append('-k')
args.append(match)
if mark:
args.append('-m')
args.append(mark)
if debug:
args.append('--log-cli-level=DEBUG')
if forked:
args.append('--forked')
rc = pytest.main(args)
if rc:
raise Failure(Result(exited=rc), 'tests failed')
@task
def docs(c):
"Build documentation"
c.run('sphinx-build -a doc build/doc')
@task
def clean(c):
print('remving build')
shutil.rmtree('build', ignore_errors=True)
print('remving dist')
shutil.rmtree('dist', ignore_errors=True)
print('remving .eggs')
shutil.rmtree('.eggs', ignore_errors=True)
print('remving lenskit.egg-info')
shutil.rmtree('lenskit.egg-info', ignore_errors=True)
if __name__ == '__main__':
import invoke.program
program = invoke.program.Program()
program.run()
| 1,562 | 23.046154 | 96 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/check.py | """
Utility functions for precondition checks.
"""
import warnings
def _get_size(m, d):
if d is None:
return len(m)
else:
return m.shape[d]
def check_value(expr, msg, *args, warn=False):
if not expr:
if warn:
warnings.warn(msg.format(*args))
else:
raise ValueError(msg.format(*args))
def check_dimension(m1, m2, msg=None, d1=None, d2=None):
"""
Check the dimensions of a pair of matrices or arrays.
Args:
m1(array-like): the left matrix or array
m2(array-like): the right matrix or array
d1(int):
the left dimension to check. If an integer, then this method will
check ``m1.shape[d1]``; if ``None``, then ``len(m1)``.
d2(int):
the right dimension to check. If an integer, then this method will
check ``m2.shape[d2]``; if ``None``, then ``len(m2)``.
"""
sz1 = _get_size(m1, d1)
sz2 = _get_size(m2, d2)
if sz1 != sz2:
if msg is None:
raise ValueError("mismatched dimensions: {} != {}", sz1, sz2)
else:
raise ValueError("{}: mismatched dimensions: {} != {}", sz1, sz2)
| 1,193 | 25.533333 | 79 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/crossfold.py | """
Data set cross-folding.
"""
from collections import namedtuple
import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from . import util
TTPair = namedtuple('TTPair', ['train', 'test'])
TTPair.__doc__ = 'Train-test pair (named tuple).'
TTPair.train.__doc__ = 'Train data for this pair.'
TTPair.test.__doc__ = 'Test data for this pair.'
_logger = logging.getLogger(__name__)
def partition_rows(data, partitions):
"""
Partition a frame of ratings or other datainto train-test partitions. This function does not
care what kind of data is in `data`, so long as it is a Pandas DataFrame (or equivalent).
:param data: a data frame containing ratings or other data you wish to partition.
:type data: :py:class:`pandas.DataFrame` or equivalent
:param partitions: the number of partitions to produce
:type partitions: integer
:rtype: iterator
:returns: an iterator of train-test pairs
"""
_logger.info('partitioning %d ratings into %d partitions', len(data), partitions)
# create an array of indexes
rows = np.arange(len(data))
# shuffle the indices & split into partitions
np.random.shuffle(rows)
test_sets = np.array_split(rows, partitions)
# convert each partition into a split
for i, ts in enumerate(test_sets):
test = data.iloc[ts, :]
trains = test_sets[:i] + test_sets[(i + 1):]
train_idx = np.concatenate(trains)
train = data.iloc[train_idx, :]
yield TTPair(train, test)
def sample_rows(data, partitions, size, disjoint=True):
"""
Sample train-test a frame of ratings into train-test partitions. This function does not care
what kind of data is in `data`, so long as it is a Pandas DataFrame (or equivalent).
We can loop over a sequence of train-test pairs::
>>> ratings = util.load_ml_ratings()
>>> for train, test in sample_rows(ratings, 5, 1000):
... print(len(test))
1000
1000
1000
1000
1000
Sometimes for testing, it is useful to just get a single pair::
>>> train, test = sample_rows(ratings, None, 1000)
>>> len(test)
1000
>>> len(test) + len(train) - len(ratings)
0
Args:
data(pandas.DataFrame):
Data frame containing ratings or other data to partition.
partitions(int or None):
The number of partitions to produce. If ``None``, produce a _single_ train-test
pair instead of an iterator or list.
size(int):
The size of each sample.
disjoint(bool):
If ``True``, force samples to be disjoint.
Returns:
iterator: An iterator of train-test pairs.
"""
if partitions is None:
test = data.sample(n=size)
tr_mask = pd.Series(True, index=data.index)
tr_mask.loc[test.index] = False
train = data[tr_mask]
return TTPair(train, test)
if disjoint and partitions * size >= len(data):
_logger.warning('wanted %d disjoint splits of %d each, but only have %d rows; partitioning',
partitions, size, len(data))
return partition_rows(data, partitions)
# create an array of indexes
rows = np.arange(len(data))
if disjoint:
_logger.info('creating %d disjoint samples of size %d', partitions, size)
ips = _disjoint_sample(rows, partitions, size)
else:
_logger.info('taking %d samples of size %d', partitions, size)
ips = _n_samples(rows, partitions, size)
return (TTPair(data.iloc[ip.train, :], data.iloc[ip.test, :]) for ip in ips)
def _disjoint_sample(xs, n, size):
# shuffle the indices & split into partitions
np.random.shuffle(xs)
# convert each partition into a split
for i in range(n):
start = i * size
test = xs[start:start + size]
train = np.concatenate((xs[:start], xs[start + size:]))
yield TTPair(train, test)
def _n_samples(xs, n, size):
for i in range(n):
test = np.random.choice(xs, size, False)
train = np.setdiff1d(xs, test, assume_unique=True)
yield TTPair(train, test)
class PartitionMethod(ABC):
"""
Partition methods select test rows for a user or item. Partition methods
are callable; when called with a data frame, they return the test rows.
"""
@abstractmethod
def __call__(self, udf):
"""
Subset a data frame.
:param udf: The input data frame of rows for a user or item.
:paramtype udf: :py:class:`pandas.DataFrame`
:returns: The data frame of test rows, a subset of `udf`.
"""
pass
class SampleN(PartitionMethod):
"""
Randomly select a fixed number of test rows per user/item.
:param n: The number of test items to select.
:paramtype n: integer
"""
def __init__(self, n):
self.n = n
def __call__(self, udf):
return udf.sample(n=self.n)
class SampleFrac(PartitionMethod):
"""
Randomly select a fraction of test rows per user/item.
:param frac: the fraction of items to select for testing.
:paramtype frac: double
"""
def __init__(self, frac):
self.fraction = frac
def __call__(self, udf):
return udf.sample(frac=self.fraction)
class LastN(PartitionMethod):
"""
Select a fixed number of test rows per user/item, based on ordering by a
column.
:param n: The number of test items to select.
:paramtype n: integer
:param col: The column to sort by.
"""
def __init__(self, n, col='timestamp'):
self.n = n
self.column = col
def __call__(self, udf):
return udf.sort_values(self.column).iloc[-self.n:]
class LastFrac(PartitionMethod):
"""
Select a fraction of test rows per user/item.
:param frac: the fraction of items to select for testing.
:paramtype frac: double
:param col: The column to sort by.
"""
def __init__(self, frac, col='timestamp'):
self.fraction = frac
self.column = col
def __call__(self, udf):
n = round(len(udf) * self.fraction)
return udf.sort_values(self.column).iloc[-n:]
def partition_users(data, partitions: int, method: PartitionMethod):
"""
Partition a frame of ratings or other data into train-test partitions user-by-user.
This function does not care what kind of data is in `data`, so long as it is a Pandas DataFrame
(or equivalent) and has a `user` column.
:param data: a data frame containing ratings or other data you wish to partition.
:type data: :py:class:`pandas.DataFrame` or equivalent
:param partitions: the number of partitions to produce
:type partitions: integer
:param method: The method for selecting test rows for each user.
:rtype: iterator
:returns: an iterator of train-test pairs
"""
user_col = data['user']
users = user_col.unique()
_logger.info('partitioning %d rows for %d users into %d partitions',
len(data), len(users), partitions)
# create an array of indexes into user row
rows = np.arange(len(users))
# shuffle the indices & split into partitions
np.random.shuffle(rows)
test_sets = np.array_split(rows, partitions)
# convert each partition into a split
for i, ts in enumerate(test_sets):
# get our users!
test_us = users[ts]
# sample the data frame
ugf = data[data.user.isin(test_us)].groupby('user')
test = ugf.apply(method)
# get rid of the group index
test = test.reset_index(0, drop=True)
# now test is indexed on the data frame! so we can get the rest
rest = data.index.difference(test.index)
train = data.loc[rest]
yield TTPair(train, test)
def sample_users(data, partitions: int, size: int, method: PartitionMethod, disjoint=True):
"""
Create train-test partitions by sampling users.
This function does not care what kind of data is in `data`, so long as it is
a Pandas DataFrame (or equivalent) and has a `user` column.
Args:
data(pandas.DataFrame):
Data frame containing ratings or other data you wish to partition.
partitions(int):
The number of partitions.
size(int):
The sample size.
method(PartitionMethod):
The method for obtaining user test ratings.
Returns:
iterator: An iterator of train-test pairs (as :class:`TTPair` objects).
"""
user_col = data['user']
users = user_col.unique()
if disjoint and partitions * size >= len(users):
_logger.warning('cannot take %d disjoint samples of size %d from %d users',
partitions, size, len(users))
for p in partition_users(data, partitions, method):
yield p
return
_logger.info('sampling %d users into %d partitions (n=%d)',
len(users), partitions, size)
if disjoint:
np.random.shuffle(users)
# generate our samples
for i in range(partitions):
# get our test users!
if disjoint:
test_us = users[i*size:(i+1)*size]
else:
test_us = np.random.choice(users, size, False)
# sample the data frame
test = data[data.user.isin(test_us)].groupby('user').apply(method)
# get rid of the group index
test = test.reset_index(0, drop=True)
# now test is indexed on the data frame! so we can get the rest
rest = data.index.difference(test.index)
train = data.loc[rest]
yield TTPair(train, test)
| 9,699 | 30.493506 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/topn.py | import logging
import warnings
from collections import OrderedDict as od
import numpy as np
import pandas as pd
from .metrics.topn import *
_log = logging.getLogger(__name__)
class RecListAnalysis:
"""
Compute one or more top-N metrics over recommendation lists.
This method groups the recommendations by the specified columns,
and computes the metric over each group. The default set of grouping
columns is all columns *except* the following:
* ``item``
* ``rank``
* ``score``
* ``rating``
The truth frame, ``truth``, is expected to match over (a subset of) the
grouping columns, and contain at least an ``item`` column. If it also
contains a ``rating`` column, that is used as the users' rating for
metrics that require it; otherwise, a rating value of 1 is assumed.
Args:
group_cols(list):
The columns to group by, or ``None`` to use the default.
"""
DEFAULT_SKIP_COLS = ['item', 'rank', 'score', 'rating']
def __init__(self, group_cols=None):
self.group_cols = group_cols
self.metrics = []
def add_metric(self, metric, *, name=None, **kwargs):
"""
Add a metric to the analysis.
A metric is a function of two arguments: the a single group of the recommendation
frame, and the corresponding truth frame. The truth frame will be indexed by
item ID. Many metrics are defined in :mod:`lenskit.metrics.topn`; they are
re-exported from :mod:`lenskit.topn` for convenience.
Args:
metric: The metric to compute.
name: The name to assign the metric. If not provided, the function name is used.
**kwargs: Additional arguments to pass to the metric.
"""
if name is None:
name = metric.__name__
self.metrics.append((metric, name, kwargs))
def compute(self, recs, truth, *, progress=lambda x: x):
"""
Run the analysis. Neither data frame should be meaningfully indexed.
Args:
recs(pandas.DataFrame):
A data frame of recommendations.
truth(pandas.DataFrame):
A data frame of ground truth (test) data.
Returns:
pandas.DataFrame: The results of the analysis.
"""
_log.info('analyzing %d recommendations (%d truth rows)', len(recs), len(truth))
gcols = self.group_cols
if gcols is None:
gcols = [c for c in recs.columns if c not in self.DEFAULT_SKIP_COLS]
_log.info('using group columns %s', gcols)
_log.info('ungrouped columns: %s', [c for c in recs.columns if c not in gcols])
gc_map = dict((c, i) for (i, c) in enumerate(gcols))
ti_cols = [c for c in gcols if c in truth.columns]
ti_cols.append('item')
_log.info('using truth ID columns %s', ti_cols)
truth = truth.set_index(ti_cols)
if not truth.index.is_unique:
warnings.warn('truth frame does not have unique values')
truth.sort_index(inplace=True)
_log.info('preparing analysis result storage')
# we manually use grouping internals
grouped = recs.groupby(gcols)
res = pd.DataFrame(od((k, np.nan) for (f, k, args) in self.metrics),
index=grouped.grouper.result_index)
assert len(res) == len(grouped.groups), \
"result set size {} != group count {}".format(len(res), len(grouped.groups))
assert res.index.nlevels == len(gcols)
_log.info('computing anlysis for %d lists', len(res))
for i, row_key in enumerate(progress(res.index)):
g_rows = grouped.indices[row_key]
g_recs = recs.iloc[g_rows, :]
if len(ti_cols) == len(gcols) + 1:
tr_key = row_key
else:
tr_key = tuple([row_key[gc_map[c]] for c in ti_cols[:-1]])
g_truth = truth.loc[tr_key, :]
for j, (mf, mn, margs) in enumerate(self.metrics):
res.iloc[i, j] = mf(g_recs, g_truth, **margs)
return res
class UnratedCandidates:
"""
Candidate selector that selects unrated items from a training set.
Args:
training(pandas.DataFrame):
the training data; must have ``user`` and ``item`` columns.
"""
def __init__(self, training):
warnings.warn('UnratedCandidates deprecated, use default item selector', DeprecationWarning)
self.training = training.set_index('user').item
self.items = training.item.unique()
def __call__(self, user, *args, **kwargs):
urates = self.training.loc[user]
return np.setdiff1d(self.items, urates)
| 4,727 | 34.283582 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/util.py | """
Miscellaneous utility functions.
"""
import os
import os.path
import time
import pathlib
import warnings
import logging
from copy import deepcopy
from collections.abc import Iterable, Sequence
from numba import jitclass, njit, int32, double
import numpy as np
import pandas as pd
from .algorithms import Algorithm
try:
import fastparquet
except ImportError:
fastparquet = None
_log = logging.getLogger(__name__)
__os_fp = getattr(os, 'fspath', None)
@njit
def _ind_downheap(pos: int, size, keys, values):
min = pos
left = 2*pos + 1
right = 2*pos + 2
if left < size and values[keys[left]] < values[keys[min]]:
min = left
if right < size and values[keys[right]] < values[keys[min]]:
min = right
if min != pos:
kt = keys[min]
keys[min] = keys[pos]
keys[pos] = kt
_ind_downheap(min, size, keys, values)
@jitclass([
('nmax', int32),
('size', int32),
('keys', int32[:]),
('values', double[:])
])
class Accumulator:
def __init__(self, values, nmax):
self.values = values
self.nmax = nmax
self.size = 0
self.keys = np.zeros(nmax + 1, dtype=np.int32)
def __len__(self):
return self.size
def add(self, key):
if key < 0 or key >= self.values.shape[0]:
raise IndexError()
self.keys[self.size] = key
self._upheap(self.size)
if self.size < self.nmax:
self.size = self.size + 1
else:
# we are at capacity, we need to drop the smallest value
self.keys[0] = self.keys[self.size]
_ind_downheap(0, self.size, self.keys, self.values)
def add_all(self, keys):
for i in range(len(keys)):
self.add(keys[i])
def peek(self):
if self.size > 0:
return self.keys[0]
else:
return -1
def remove(self):
if self.size == 0:
return -1
top = self.keys[0]
self.keys[0] = self.keys[self.size - 1]
self.size = self.size - 1
if self.size > 0:
_ind_downheap(0, self.size, self.keys, self.values)
return top
def top_keys(self):
keys = np.empty(self.size, dtype=np.int32)
while self.size > 0:
i = self.size - 1
keys[i] = self.remove()
return keys
def _upheap(self, pos):
keys = self.keys
values = self.values
current = pos
parent = (current - 1) // 2
while current > 0 and values[keys[parent]] > values[keys[current]]:
# swap up
kt = keys[parent]
keys[parent] = keys[current]
keys[current] = kt
current = parent
parent = (current - 1) // 2
class Stopwatch():
start_time = None
stop_time = None
def __init__(self, start=True):
if start:
self.start()
def start(self):
self.start_time = time.perf_counter()
def stop(self):
self.stop_time = time.perf_counter()
def elapsed(self):
stop = self.stop_time
if stop is None:
stop = time.perf_counter()
return stop - self.start_time
def __str__(self):
elapsed = self.elapsed()
if elapsed < 1:
return "{: 0.0f}ms".format(elapsed * 1000)
elif elapsed > 60 * 60:
h, m = divmod(elapsed, 60 * 60)
m, s = divmod(m, 60)
return "{:0.0f}h{:0.0f}m{:0.2f}s".format(h, m, s)
elif elapsed > 60:
m, s = divmod(elapsed, 60)
return "{:0.0f}m{:0.2f}s".format(m, s)
else:
return "{:0.2f}s".format(elapsed)
def clone(algo):
"""
Clone an algorithm, but not its fitted data. This is like
:py:func:`scikit.base.clone`, but may not work on arbitrary SciKit estimators.
LensKit algorithms are compatible with SciKit clone, however, so feel free
to use that if you need more general capabilities.
This function is somewhat derived from the SciKit one.
>>> from lenskit.algorithms.basic import Bias
>>> orig = Bias()
>>> copy = clone(orig)
>>> copy is orig
False
>>> copy.damping == orig.damping
True
"""
_log.debug('cloning %s', algo)
if isinstance(algo, Algorithm) or hasattr(algo, 'get_params'):
params = algo.get_params(deep=False)
sps = dict([(k, clone(v)) for (k, v) in params.items()])
return algo.__class__(**sps)
elif isinstance(algo, list) or isinstance(algo, tuple):
return [clone(a) for a in algo]
else:
return deepcopy(algo)
def fspath(path):
"Backport of :py:func:`os.fspath` function for Python 3.5."
if __os_fp:
return __os_fp(path)
else:
return str(path)
def read_df_detect(path):
"""
Read a Pandas data frame, auto-detecting the file format based on filename suffix.
The following file types are supported:
CSV
File has suffix ``.csv``, read with :py:func:`pandas.read_csv`.
Parquet
File has suffix ``.parquet``, ``.parq``, or ``.pq``, read with
:py:func:`pandas.read_parquet`.
"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
if path.suffix == '.csv':
return pd.read_csv(path)
elif path.suffix in ('.parquet', '.parq', '.pq'):
return pd.read_parquet(path)
def write_parquet(path, frame, append=False):
"""
Write a Parquet file.
Args:
path(pathlib.Path): The path of the Parquet file to write.
frame(pandas.DataFrame): The data to write.
append(bool): Whether to append to the file or overwrite it.
"""
fn = fspath(path)
append = append and os.path.exists(fn)
_log.debug('%s %d rows to Parquet file %s',
'appending' if append else 'writing',
len(frame), fn)
if fastparquet is not None:
fastparquet.write(fn, frame, append=append, compression='snappy')
elif append:
warnings.warn('fastparquet not available, appending is slow')
odf = pd.read_parquet(fn)
pd.concat([odf, frame], ignore_index=True).to_parquet(fn)
else:
frame.to_parquet(fn)
class LastMemo:
def __init__(self, func):
self.function = func
self.memory = None
self.result = None
def __call__(self, arg):
if arg is not self.memory:
self.result = self.function(arg)
self.memory = arg
return self.result
def load_ml_ratings(path='ml-latest-small'):
"""
Load the ratings from a modern MovieLens data set (ML-20M or one of the ‘latest’ data sets).
>>> load_ml_ratings().head()
user item rating timestamp
0 1 31 2.5 1260759144
1 1 1029 3.0 1260759179
2 1 1061 3.0 1260759182
3 1 1129 2.0 1260759185
4 1 1172 4.0 1260759205
Args:
path: The path where the MovieLens data is unpacked.
Returns:
pandas.DataFrame:
The rating data, with user and item columns named properly for LensKit.
"""
path = pathlib.Path(path)
file = path / 'ratings.csv'
ratings = pd.read_csv(fspath(file))
ratings.rename(columns={'movieId': 'item', 'userId': 'user'}, inplace=True)
return ratings
| 7,342 | 25.897436 | 96 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/__init__.py | """
The LensKit package.
"""
from . import batch
class DataWarning(UserWarning):
"""
Warning raised for detectable problems with input data.
"""
pass
| 169 | 12.076923 | 59 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/_mkl_ops.py | import logging
import cffi
import numpy as np
from .matrix import CSR
_logger = logging.getLogger(__name__)
__mkl_syrk_defs = '''
typedef void* sparse_matrix_t;
struct matrix_descr {
int type;
int mode;
int diag;
};
int mkl_sparse_d_create_csr(sparse_matrix_t *A, int indexing, int rows, int cols,
int *rows_start, int *rows_end, int *col_indx, double *values);
int mkl_sparse_d_export_csr(const sparse_matrix_t source, int *indexing, int *rows, int *cols,
int **rows_start, int **rows_end, int **col_indx, double **values);
int mkl_sparse_order(sparse_matrix_t A);
int mkl_sparse_destroy(sparse_matrix_t A);
int mkl_sparse_syrk (int operation, const sparse_matrix_t A, sparse_matrix_t *C);
int mkl_sparse_d_mv (int operation, double alpha,
const sparse_matrix_t A, struct matrix_descr descr,
const double *x, double beta, double *y);
'''
_logger.debug('initializing CFFI interface')
_mkl_ffi = cffi.FFI()
_mkl_ffi.cdef(__mkl_syrk_defs)
try:
_logger.debug('importing MKL')
_mkl_lib = _mkl_ffi.dlopen('mkl_rt')
_logger.info('Loaded MKL')
except OSError:
_logger.info('Cannot load MKL')
_mkl_lib = None
def _mkl_check_return(rv, call='<unknown>'):
if rv:
raise RuntimeError('MKL call {} failed with code {}'.format(call, rv))
def _mkl_basic_descr():
desc = _mkl_ffi.new('struct matrix_descr*')
desc.type = 20 # general matrix
desc.mode = 0
desc.diag = 0
return desc
class SparseM:
"""
Class encapsulating an MKL sparse matrix handle.
"""
def __init__(self):
self.h_ptr = _mkl_ffi.new('sparse_matrix_t*')
@classmethod
def from_csr(cls, csr):
"""
Create an MKL sparse matrix from a LensKit CSR matrix.
Args:
csr(CSR): the input matrix.
Returns:
SparseM: a sparse matrix handle for the CSR matrix.
"""
sp = np.require(csr.rowptrs, np.intc, 'C')
ep = np.require(csr.rowptrs[1:], np.intc, 'C')
cols = np.require(csr.colinds, np.intc, 'C')
vals = np.require(csr.values, np.float_, 'C')
m = SparseM()
_sp = _mkl_ffi.cast('int*', sp.ctypes.data)
_ep = _mkl_ffi.cast('int*', ep.ctypes.data)
_cols = _mkl_ffi.cast('int*', cols.ctypes.data)
_vals = _mkl_ffi.cast('double*', vals.ctypes.data)
rv = _mkl_lib.mkl_sparse_d_create_csr(m.h_ptr, 0, csr.nrows, csr.ncols,
_sp, _ep, _cols, _vals)
_mkl_check_return(rv, 'mkl_sparse_d_create_csr')
return m
@property
def handle(self):
return self.h_ptr[0]
def __del__(self):
if self.h_ptr[0]:
_logger.debug('destroying MKL sparse matrix')
_mkl_lib.mkl_sparse_destroy(self.handle)
def export(self):
"""
Export an MKL sparse matrix as a LensKit CSR.
Returns:
CSR: the LensKit matrix.
"""
indP = _mkl_ffi.new('int*')
nrP = _mkl_ffi.new('int*')
ncP = _mkl_ffi.new('int*')
rsP = _mkl_ffi.new('int**')
reP = _mkl_ffi.new('int**')
ciP = _mkl_ffi.new('int**')
vsP = _mkl_ffi.new('double**')
rv = _mkl_lib.mkl_sparse_d_export_csr(self.handle, indP, nrP, ncP, rsP, reP, ciP, vsP)
_mkl_check_return(rv, 'mkl_sparse_d_export_csr')
if indP[0] != 0:
raise ValueError('output index is not 0-indexed')
nr = nrP[0]
nc = ncP[0]
reB = _mkl_ffi.buffer(reP[0], nr * _mkl_ffi.sizeof('int'))
re = np.frombuffer(reB, np.intc)
nnz = re[nr-1]
ciB = _mkl_ffi.buffer(ciP[0], nnz * _mkl_ffi.sizeof('int'))
vsB = _mkl_ffi.buffer(vsP[0], nnz * _mkl_ffi.sizeof('double'))
cols = np.frombuffer(ciB, np.intc)[:nnz].copy()
vals = np.frombuffer(vsB, np.float_)[:nnz].copy()
rowptrs = np.zeros(nr + 1, dtype=np.int32)
rowptrs[1:] = re
return CSR(nr, nc, nnz, rowptrs, cols, vals)
def mult_vec(self, alpha, x, beta, y):
"""
Compute :math:`\\alpha A x + \\beta y`, where :math:`A` is this matrix.
"""
desc = _mkl_basic_descr()
x = np.require(x, np.float64, 'C')
yout = np.require(y, np.float64, 'C')
if yout is y:
yout = yout.copy()
_x = _mkl_ffi.cast('double*', x.ctypes.data)
_y = _mkl_ffi.cast('double*', yout.ctypes.data)
rv = _mkl_lib.mkl_sparse_d_mv(10, alpha, self.handle, desc[0], _x, beta, _y)
_mkl_check_return(rv, 'mkl_sparse_d_mv')
return yout
def csr_syrk(csr: CSR):
"""
Interface to the ``mkl_sparse_syrk`` routine, with necessary setup and conversion.
"""
_logger.debug('syrk: processing %dx%d matrix (%d nnz)', csr.nrows, csr.ncols, csr.nnz)
src = SparseM.from_csr(csr)
_logger.debug('syrk: ordering matrix')
rv = _mkl_lib.mkl_sparse_order(src.handle)
_mkl_check_return(rv, 'mkl_sparse_order')
_logger.debug('syrk: multiplying matrix')
mult = SparseM()
rv = _mkl_lib.mkl_sparse_syrk(11, src.handle, mult.h_ptr)
_mkl_check_return(rv, 'mkl_sparse_syrk')
del src # free a little memory
_logger.debug('syrk: exporting matrix')
result = mult.export()
_logger.debug('syrk: received %dx%d matrix (%d nnz)',
result.nrows, result.ncols, result.nnz)
return result
| 5,484 | 30.164773 | 95 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/matrix.py | """
Utilities for working with rating matrices.
"""
from collections import namedtuple
import logging
import warnings
import pandas as pd
import numpy as np
import scipy.sparse as sps
import numba as n
from numba import njit, jitclass, prange
_logger = logging.getLogger(__name__)
RatingMatrix = namedtuple('RatingMatrix', ['matrix', 'users', 'items'])
RatingMatrix.__doc__ = """
A rating matrix with associated indices.
Attributes:
matrix(CSR or scipy.sparse.csr_matrix):
The rating matrix, with users on rows and items on columns.
users(pandas.Index): mapping from user IDs to row numbers.
items(pandas.Index): mapping from item IDs to column numbers.
"""
def mkl_ops():
"""
Import and return the MKL operations module. This is only for internal use.
"""
try:
from . import _mkl_ops
if _mkl_ops._mkl_lib:
return _mkl_ops
else:
return None
except ImportError:
return None
def _csr_delegate(name):
def func(self):
return getattr(self.N, name)
return property(func)
@jitclass({
'nrows': n.int32,
'ncols': n.int32,
'nnz': n.int32,
'rowptrs': n.int32[:],
'colinds': n.int32[:],
'values': n.optional(n.float64[:])
})
class _CSR:
"""
Internal implementation class for :py:class:`CSR`. If you work with CSRs from Numba,
you will use this.
"""
def __init__(self, nrows, ncols, nnz, ptrs, inds, vals):
self.nrows = nrows
self.ncols = ncols
self.nnz = nnz
self.rowptrs = ptrs
self.colinds = inds
self.values = vals
def row(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row + 1]
v = np.zeros(self.ncols)
cols = self.colinds[sp:ep]
if self.values is None:
v[cols] = 1
else:
v[cols] = self.values[sp:ep]
return v
def row_extent(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row+1]
return (sp, ep)
def row_cs(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row + 1]
return self.colinds[sp:ep]
def row_vs(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row + 1]
if self.values is None:
return np.full(ep - sp, 1.0)
else:
return self.values[sp:ep]
class CSR:
"""
Simple compressed sparse row matrix. This is like :py:class:`scipy.sparse.csr_matrix`, with
a couple of useful differences:
* It is backed by a Numba jitclass, so it can be directly used from Numba-optimized functions.
* The value array is optional, for cases in which only the matrix structure is required.
* The value array, if present, is always double-precision.
You generally don't want to create this class yourself with the constructor. Instead, use one
of its class methods.
If you need to pass an instance off to a Numba-compiled function, use :py:attr:`N`::
_some_numba_fun(csr.N)
We use the indirection between this and the Numba jitclass so that the main CSR implementation
can be pickled, and so that we can have class and instance methods that are not compatible with
jitclass but which are useful from interpreted code.
Attributes:
N(_CSR): the Numba jitclass backing (has the same attributes and most methods).
nrows(int): the number of rows.
ncols(int): the number of columns.
nnz(int): the number of entries.
rowptrs(numpy.ndarray): the row pointers.
colinds(numpy.ndarray): the column indices.
values(numpy.ndarray): the values
"""
__slots__ = ['N']
def __init__(self, nrows=None, ncols=None, nnz=None, ptrs=None, inds=None, vals=None, N=None):
if N is not None:
self.N = N
else:
self.N = _CSR(nrows, ncols, nnz, ptrs, inds, vals)
@classmethod
def from_coo(cls, rows, cols, vals, shape=None):
"""
Create a CSR matrix from data in COO format.
Args:
rows(array-like): the row indices.
cols(array-like): the column indices.
vals(array-like): the data values; can be ``None``.
shape(tuple): the array shape, or ``None`` to infer from row & column indices.
"""
if shape is not None:
nrows, ncols = shape
assert np.max(rows) < nrows
assert np.max(cols) < ncols
else:
nrows = np.max(rows) + 1
ncols = np.max(cols) + 1
nnz = len(rows)
assert len(cols) == nnz
assert vals is None or len(vals) == nnz
rowptrs = np.zeros(nrows + 1, dtype=np.int32)
align = np.full(nnz, -1, dtype=np.int32)
_csr_align(rows, nrows, rowptrs, align)
cols = cols[align].copy()
vals = vals[align].copy() if vals is not None else None
return cls(nrows, ncols, nnz, rowptrs, cols, vals)
@classmethod
def from_scipy(cls, mat, copy=True):
"""
Convert a scipy sparse matrix to an internal CSR.
Args:
mat(scipy.sparse.spmatrix): a SciPy sparse matrix.
copy(bool): if ``False``, reuse the SciPy storage if possible.
Returns:
CSR: a CSR matrix.
"""
if not sps.isspmatrix_csr(mat):
mat = mat.tocsr(copy=copy)
rp = np.require(mat.indptr, np.int32, 'C')
if copy and rp is mat.indptr:
rp = rp.copy()
cs = np.require(mat.indices, np.int32, 'C')
if copy and cs is mat.indices:
cs = cs.copy()
vs = mat.data.copy() if copy else mat.data
return cls(mat.shape[0], mat.shape[1], mat.nnz, rp, cs, vs)
def to_scipy(self):
"""
Convert a CSR matrix to a SciPy :py:class:`scipy.sparse.csr_matrix`.
Args:
self(CSR): A CSR matrix.
Returns:
scipy.sparse.csr_matrix:
A SciPy sparse matrix with the same data.
"""
values = self.values
if values is None:
values = np.full(self.nnz, 1.0)
return sps.csr_matrix((values, self.colinds, self.rowptrs), shape=(self.nrows, self.ncols))
nrows = _csr_delegate('nrows')
ncols = _csr_delegate('ncols')
nnz = _csr_delegate('nnz')
rowptrs = _csr_delegate('rowptrs')
colinds = _csr_delegate('colinds')
values = _csr_delegate('values')
@values.setter
def values(self, vs: np.ndarray):
if vs is not None:
if not isinstance(vs, np.ndarray):
raise TypeError('values not an ndarray')
if vs.ndim != 1:
raise ValueError('values has {} dimensions, expected 1'.format(vs.ndims))
if vs.shape[0] < self.nnz:
s = 'values has only {} entries (expected at least {})'
raise ValueError(s.format(vs.shape[0], self.nnz))
vs = vs[:self.nnz]
vs = np.require(vs, 'f8')
self.N.values = vs
def rowinds(self) -> np.ndarray:
"""
Get the row indices from this array. Combined with :py:attr:`colinds` and
:py:attr:`values`, this can form a COO-format sparse matrix.
.. note:: This method is not available from Numba.
"""
return np.repeat(np.arange(self.nrows, dtype=np.int32), np.diff(self.rowptrs))
def row(self, row):
"""
Return a row of this matrix as a dense ndarray.
Args:
row(int): the row index.
Returns:
numpy.ndarray: the row, with 0s in the place of missing values.
"""
return self.N.row(row)
def row_extent(self, row):
"""
Get the extent of a row in the underlying column index and value arrays.
Args:
row(int): the row index.
Returns:
tuple: ``(s, e)``, where the row occupies positions :math:`[s, e)` in the
CSR data.
"""
return self.N.row_extent(row)
def row_cs(self, row):
"""
Get the column indcies for the stored values of a row.
"""
return self.N.row_cs(row)
def row_vs(self, row):
"""
Get the stored values of a row.
"""
return self.N.row_vs(row)
def row_nnzs(self):
"""
Get a vector of the number of nonzero entries in each row.
.. note:: This method is not available from Numba.
Returns:
numpy.ndarray: the number of nonzero entries in each row.
"""
return np.diff(self.rowptrs)
def sort_values(self):
"""
Sort CSR rows in nonincreasing order by value.
.. note:: This method is not available from Numba.
"""
_csr_sort(self.nrows, self.rowptrs, self.colinds, self.values)
def transpose(self, values=True):
"""
Transpose a CSR matrix.
.. note:: This method is not available from Numba.
Args:
values(bool): whether to include the values in the transpose.
Returns:
CSR: the transpose of this matrix (or, equivalently, this matrix in CSC format).
"""
rowinds = self.rowinds()
align = np.empty(self.nnz, dtype=np.int32)
colptrs = np.zeros(self.ncols + 1, dtype=np.int32)
_csr_align(self.colinds, self.ncols, colptrs, align)
n_rps = colptrs
n_cis = rowinds[align].copy()
if values and self.values is not None:
n_vs = self.values[align].copy()
else:
n_vs = None
return CSR(self.ncols, self.nrows, self.nnz, n_rps, n_cis, n_vs)
def __str__(self):
return '<CSR {}x{} ({} nnz)>'.format(self.nrows, self.ncols, self.nnz)
def __getstate__(self):
return dict(shape=(self.nrows, self.ncols), nnz=self.nnz,
rowptrs=self.rowptrs, colinds=self.colinds, values=self.values)
def __setstate__(self, state):
nrows, ncols = state['shape']
nnz = state['nnz']
rps = state['rowptrs']
cis = state['colinds']
vs = state['values']
self.N = _CSR(nrows, ncols, nnz, rps, cis, vs)
@njit(n.void(n.intc, n.int32[:], n.int32[:], n.double[:]),
parallel=True, nogil=True)
def _csr_sort(nrows, rowptrs, colinds, values):
assert len(rowptrs) > nrows
for i in prange(nrows):
sp = rowptrs[i]
ep = rowptrs[i+1]
if ep > sp:
ord = np.argsort(values[sp:ep])
ord = ord[::-1]
colinds[sp:ep] = colinds[sp + ord]
values[sp:ep] = values[sp + ord]
@njit
def _csr_align(rowinds, nrows, rowptrs, align):
rcts = np.zeros(nrows, dtype=np.int32)
for r in rowinds:
rcts[r] += 1
rowptrs[1:] = np.cumsum(rcts)
rpos = rowptrs[:-1].copy()
for i in range(len(rowinds)):
row = rowinds[i]
pos = rpos[row]
align[pos] = i
rpos[row] += 1
def sparse_ratings(ratings, scipy=False):
"""
Convert a rating table to a sparse matrix of ratings.
Args:
ratings(pandas.DataFrame): a data table of (user, item, rating) triples.
scipy: if ``True``, return a SciPy matrix instead of :py:class:`CSR`.
Returns:
RatingMatrix:
a named tuple containing the sparse matrix, user index, and item index.
"""
uidx = pd.Index(ratings.user.unique(), name='user')
iidx = pd.Index(ratings.item.unique(), name='item')
_logger.debug('creating matrix with %d ratings for %d items by %d users',
len(ratings), len(iidx), len(uidx))
row_ind = uidx.get_indexer(ratings.user).astype(np.int32)
col_ind = iidx.get_indexer(ratings.item).astype(np.int32)
if 'rating' in ratings.columns:
vals = np.require(ratings.rating.values, np.float64)
else:
vals = None
matrix = CSR.from_coo(row_ind, col_ind, vals, (len(uidx), len(iidx)))
if scipy:
matrix = matrix.to_scipy()
return RatingMatrix(matrix, uidx, iidx)
| 12,071 | 28.660934 | 99 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/item_knn.py | """
Item-based k-NN collaborative filtering.
"""
import pathlib
import logging
import warnings
import time
import pandas as pd
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg as spla
from numba import njit, prange
from itertools import combinations
from lenskit import util, matrix, DataWarning
from . import Predictor
_logger = logging.getLogger(__name__)
@njit(nogil=True)
def _predict_weighted_average(model, nitems, nrange, ratings, targets):
min_nbrs, max_nbrs = nrange
scores = np.full(nitems, np.nan, dtype=np.float_)
for i in prange(targets.shape[0]):
iidx = targets[i]
rptr = model.rowptrs[iidx]
rend = model.rowptrs[iidx + 1]
num = 0
denom = 0
nnbrs = 0
for j in range(rptr, rend):
nidx = model.colinds[j]
if np.isnan(ratings[nidx]):
continue
nnbrs = nnbrs + 1
num = num + ratings[nidx] * model.values[j]
denom = denom + np.abs(model.values[j])
if max_nbrs > 0 and nnbrs >= max_nbrs:
break
if nnbrs < min_nbrs:
continue
scores[iidx] = num / denom
return scores
@njit(nogil=True)
def _predict_sum(model, nitems, nrange, ratings, targets):
min_nbrs, max_nbrs = nrange
scores = np.full(nitems, np.nan, dtype=np.float_)
for i in prange(targets.shape[0]):
iidx = targets[i]
rptr = model.rowptrs[iidx]
rend = model.rowptrs[iidx + 1]
score = 0
nnbrs = 0
for j in range(rptr, rend):
nidx = model.colinds[j]
if np.isnan(ratings[nidx]):
continue
nnbrs = nnbrs + 1
score = score + model.values[j]
if max_nbrs > 0 and nnbrs >= max_nbrs:
break
if nnbrs < min_nbrs:
continue
scores[iidx] = score
return scores
_predictors = {
'weighted-average': _predict_weighted_average,
'sum': _predict_sum
}
class ItemItem(Predictor):
"""
Item-item nearest-neighbor collaborative filtering with ratings. This item-item implementation
is not terribly configurable; it hard-codes design decisions found to work well in the previous
Java-based LensKit code.
Attributes:
item_index_(pandas.Index): the index of item IDs.
item_means_(numpy.ndarray): the mean rating for each known item.
item_counts_(numpy.ndarray): the number of saved neighbors for each item.
sim_matrix_(matrix.CSR): the similarity matrix.
user_index_(pandas.Index): the index of known user IDs for the rating matrix.
rating_matrix_(matrix.CSR): the user-item rating matrix for looking up users' ratings.
"""
def __init__(self, nnbrs, min_nbrs=1, min_sim=1.0e-6, save_nbrs=None,
center=True, aggregate='weighted-average'):
"""
Args:
nnbrs(int):
the maximum number of neighbors for scoring each item (``None`` for unlimited)
min_nbrs(int): the minimum number of neighbors for scoring each item
min_sim(double): minimum similarity threshold for considering a neighbor
save_nbrs(double):
the number of neighbors to save per item in the trained model
(``None`` for unlimited)
center(bool):
whether to normalize (mean-center) rating vectors. Turn this off when working
with unary data and other data types that don't respond well to centering.
aggregate:
the type of aggregation to do. Can be ``weighted-average`` or ``sum``.
"""
self.nnbrs = nnbrs
if self.nnbrs is not None and self.nnbrs < 1:
self.nnbrs = -1
self.min_nbrs = min_nbrs
if self.min_nbrs is not None and self.min_nbrs < 1:
self.min_nbrs = 1
self.min_sim = min_sim
self.save_nbrs = save_nbrs
self.center = center
self.aggregate = aggregate
try:
self._predict_agg = _predictors[aggregate]
except KeyError:
raise ValueError('unknown aggregator {}'.format(aggregate))
def fit(self, ratings):
"""
Train a model.
The model-training process depends on ``save_nbrs`` and ``min_sim``, but *not* on other
algorithm parameters.
Args:
ratings(pandas.DataFrame):
(user,item,rating) data for computing item similarities.
"""
# Training proceeds in 2 steps:
# 1. Normalize item vectors to be mean-centered and unit-normalized
# 2. Compute similarities with pairwise dot products
self._timer = util.Stopwatch()
init_rmat, users, items = matrix.sparse_ratings(ratings)
'''
# Find User Rating to remove for experimenting with Unlearn Algorithm
# Try to Find non trivial rating items to remove
for index, row in ratings.iterrows():
if items.get_loc(row['item']) in [17,138,22,83,76,31,92]:
#print(row['user'],row['item'],index,users.get_loc(row['user']),items.get_loc(row['item']))
pass
'''
n_items = len(items)
_logger.info('[%s] made sparse matrix for %d items (%d ratings from %d users)',
self._timer, len(items), init_rmat.nnz, len(users))
start = time.time()
rmat_scipy = init_rmat.to_scipy()
self._compute_similarities_unlearn_min_centering_sparse_vectorize(rmat_scipy,items,users)
end = time.time()
learn_unlearn_time = end - start
print("Unlearn Supported Learning: {}".format(end-start))
rows, cols, vals = self.smat_unlearn_sparse_csr
self.smat_unlearn_sparse = sps.csr_matrix((vals,(rows,cols)),shape=(self.M,self.M))
# Print OUT Similarity Matrix to Verify Completeness
#print(self.smat_unlearn_sparse)
start = time.time()
self._unlearn_min_centering_sparse(54,17,rmat_scipy,self.smat_unlearn_sparse)
end = time.time()
unlearn_time = end - start
print("Unlearn: {}".format(end-start))
start = time.time()
rmat, item_means = self._mean_center(ratings, init_rmat, items, users)
rmat = self._normalize(rmat)
_logger.info('[%s] computing similarity matrix', self._timer)
smat = self._compute_similarities(rmat,items,users)
end = time.time()
native_learn_time = end - start
# Print OUT Similarity Matrix to Verify Completeness
#print(smat.to_scipy())
print("Native Learning: {}".format(end-start))
_logger.info('[%s] got neighborhoods for %d of %d items',
self._timer, np.sum(np.diff(smat.rowptrs) > 0), n_items)
_logger.info('[%s] computed %d neighbor pairs', self._timer, smat.nnz)
self.item_index_ = items
self.item_means_ = item_means
self.item_counts_ = np.diff(smat.rowptrs)
self.sim_matrix_ = smat
self.user_index_ = users
self.rating_matrix_ = init_rmat
# Save the Time Cost evaluation result
#f = open("output_matrix.csv","a+")
#f.write("{},{},{},{}\n".format(init_rmat.nnz ,native_learn_time,learn_unlearn_time,unlearn_time))
#f.close()
return self
# Calculate the summations for unlearn supported learning algorithm(summation form of the native algorithm)
# Mean Centering is only done with Item Means
# Use Nested Loop and np arrays (matrix)
# Super Slow
def _compute_similarities_unlearn_min_centering(self,ratings,rmat,items,users):
rmat_scipy = rmat.to_scipy()
N = len(users)
M = len(items)
SUM_ITEM = np.zeros(M)
Count_ITEM = np.zeros(M)
MEAN_ITEM = np.zeros(M)
S_ITEM = np.zeros((M,M))
S_ITEMITEM = np.zeros((M,M))
smat = np.zeros((M,M))
Count_ITEMITEM = np.zeros((M,M))
for i in range(N):
for j in range(M):
if rmat_scipy[i,j] != 0:
SUM_ITEM[j] += rmat_scipy[i,j]
Count_ITEM[j] += 1
MEAN_ITEM = SUM_ITEM / Count_ITEM
for k in range(M):
for l in range(M):
for i in range(N):
if rmat_scipy[i,l] != 0 and rmat_scipy[i,k] != 0:
#print(k,l)
S_ITEMITEM[k,l] += rmat_scipy[i,l] * rmat_scipy[i,k]
S_ITEM[k,l] += rmat_scipy[i,k]
Count_ITEMITEM[k,l] += 1
self.S_I = S_ITEM
self.S_II = S_ITEMITEM
self.M_I = MEAN_ITEM
self.N_I = Count_ITEM
self.N_II = Count_ITEMITEM
self.Sum_I = SUM_ITEM
self.N = N
self.M = M
for k in range(M):
for l in range(M):
if Count_ITEMITEM[k,l] > 0:
smat_val = self._learn_sim(S_ITEMITEM[k,l],S_ITEMITEM[k,k],S_ITEMITEM[l,l],S_ITEM[k,l],S_ITEM[l,k],MEAN_ITEM[k],MEAN_ITEM[l],Count_ITEMITEM[k,l],Count_ITEM[k],Count_ITEM[l],SUM_ITEM[k],SUM_ITEM[l])
if smat_val > 0 and k != l:
smat[k,l] = smat_val
#print(k,l,S_ITEMITEM[k,l],S_ITEMITEM[k,k],S_ITEMITEM[l,l],S_ITEM[k,l],S_ITEM[l,k],MEAN_ITEM[k],MEAN_ITEM[l],Count_ITEMITEM[k,l],Count_ITEM[k],Count_ITEM[l],SUM_ITEM[k],SUM_ITEM[l])
#print(smat[k,l],k,l)
self.smat_unlearn = smat
# Given the summations
# Calculate the similarity between item k and item l
# It is to support _compute_similarities_unlearn_min_centering and _unlearn_min_centering_sparse
def _learn_sim(self,Skl,Skk,Sll,Sk,Sl,Mk,Ml,Nkl,Nk,Nl,Sumk,Suml):
top = Skl-Mk*Sl-Ml*Sk+Mk*Ml*Nkl
deno = np.sqrt(Skk-2*Mk*Sumk+(Mk**2)*Nk) * np.sqrt(Sll-2*Ml*Suml+(Ml**2)*Nl)
if deno == 0:
return 0
else:
return top/deno
# Vectorize version of _learn_sim
# Use csr_matrix
# Calculate the similarity matrix Given all the summations
# It is to support _compute_similarities_unlearn_min_centering_sparse_slow and _compute_similarities_unlearn_min_centering_sparse_vectorize
def _learn_sim_vectorize(self, S_II=None, S_I=None, M_I=None, N_I=None, N_II=None, SUM_I=None):
S_II=self.S_II_sparse
S_I=self.S_I_sparse
M_I=self.M_I_sparse
N_I=self.N_I_sparse
N_II=self.N_II_sparse
SUM_I=self.Sum_I_sparse
M_T = M_I.transpose()
top = S_II - S_I.multiply(M_I) - S_I.transpose().multiply(M_T) + M_I.multiply(M_T).multiply(N_II)
#print(S_II[22,76],S_I.multiply(M_T)[22,76],S_I.transpose().multiply(M_I)[22,76],M_I.multiply(M_T).multiply(N_II)[22,76])
deno = sps.csr_matrix(S_II.diagonal()) - 2 * M_I.multiply(SUM_I) + M_I.multiply(M_I).multiply(N_I)
#deno = 2 * M_I.multiply(SUM_I) + M_I.multiply(M_I).multiply(N_I)
deno = deno.sqrt()
deno = deno.multiply(deno.transpose())
is_nz = deno > 0
deno[is_nz] = np.reciprocal(deno[is_nz])
smat = top.multiply(deno)
smat = smat.tocoo()
rows, cols, vals = smat.row, smat.col, smat.data
#rows = rows[:smat.nnz]
#cols = cols[:smat.nnz]
#vals = vals[:smat.nnz]
rows, cols, vals = self._filter_similarities(rows, cols, vals)
return rows, cols, vals #sps.csr_matrix((vals,(rows,cols)),shape=(self.M,self.M))
# Given the summations Calculate the similarity matrix
# Vectorize version of _learn_similarities
# Mean Centering is done with Item Means, Uesr Means, and Global Mean
def _learn_sim_global_vectorize(self, S_II=None, S_I=None, M_I=None, N_I=None, N_II=None, SUM_I=None, g=None, UM=None):
S_II=self.S_II_sparse
S_I=self.S_I_sparse
M_I=self.M_I_sparse
N_I=self.N_I_sparse
N_II=self.N_II_sparse
SUM_I=self.Sum_I_sparse
UM = self.UM_I
g = sps.csr_matrix([self.G])
M_T = M_I.transpose()
M_I_G = sps.csr_matrix(self.M_I - self.G)
M_I_M_I = sps.csr_matrix( np.repeat(self.M_I,self.M,axis = 0) + np.repeat(self.M_I.T,self.M,axis=1))
top = S_II - S_I.multiply(M_I) - S_I.transpose().multiply(M_T) + g.multiply(S_I+S_I.transpose()) - g.multiply(M_I_M_I).multiply(N_II) + M_I.multiply(M_T).multiply(N_II) + g.multiply(g).multiply(N_II)
deno = sps.csr_matrix(S_II.diagonal()) - 2 * (M_I_G).multiply(SUM_I) + (M_I_G).multiply(M_I_G).multiply(N_I)
deno = deno.sqrt()
#print(deno.shape,deno[0,1],S_II[1,1],M_I_G[0,1],SUM_I[0,1],N_I[0,1],S_I[1,1])
deno = deno.multiply(deno.transpose())
is_nz = deno > 0
deno[is_nz] = np.reciprocal(deno[is_nz])
smat = top.multiply(deno)
return smat
# Unlearn Algorithm
# Mean Centering is only done with Item Means
# Remove User u rating for Item t
def _unlearn_min_centering(self,u,t,rmat,smat):
rmat_scipy = rmat.to_scipy()
self.Sum_I[t] -= rmat_scipy[u,t]
self.M_I[t] = (self.M_I[t] * self.N_I[t] - rmat_scipy[u,t]) / (self.N_I[t] - 1)
self.N_I[t] -= 1
for k in range(self.M):
for l in range(self.M):
if rmat_scipy[u,k] != 0 and rmat_scipy[u,l] != 0:
if k == t or l == t:
#print(k,l)
self.S_II[k,l] -= rmat_scipy[u,k] * rmat_scipy[u,l]
self.S_I[k,l] -= rmat_scipy[u,k]
self.N_II[k,l] -= 1
for k in range(self.M):
if smat[k,t] != 0:
#print(smat[k,t],k,t)
smat[k,t] = self._learn_sim(self.S_II[k,t],self.S_II[k,k],self.S_II[t,t],self.S_I[k,t],self.S_I[t,k],self.M_I[k],self.M_I[t],self.N_II[k,t],self.N_I[k],self.N_I[t],self.Sum_I[k],self.Sum_I[t])
smat[t,k] = smat[k,t]
#print(smat[k,t])
# Calculate the summations for unlearn supported learning algorithm(summation form of the native algorithm)
# Mean Centering is done with Item Means, Uesr Means, and Global Mean
# Np arrays/matrices are used
def _compute_similarities_unlearn_global(self,ratings,rmat,items,users):
rmat_scipy = rmat.to_scipy()
N = len(users)
M = len(items)
SUM_USER = np.zeros(N)
MEAN_USER = np.zeros(N)
SUM_ITEM = np.zeros(M)
Count_ITEM = np.zeros(M)
MEAN_ITEM = np.zeros(M)
SUM_g = 0
for i in range(N):
Count = 0
for j in range(M):
if rmat_scipy[i,j] != 0:
SUM_USER[i] += rmat_scipy[i,j]
Count += 1
SUM_ITEM[j] += rmat_scipy[i,j]
Count_ITEM[j] += 1
SUM_g += rmat_scipy[i,j]
MEAN_USER[i] = SUM_USER[i] / Count
g = SUM_g / np.sum(Count_ITEM)
#print(np.sum(Count_ITEM))
S_Item = np.zeros(len(items))
S_ItemItem = np.zeros((len(items),len(items)))
smat = np.zeros((len(items),len(items)))
r_copy = rmat_scipy.copy()
for k in range(M):
MEAN_ITEM[k] = SUM_ITEM[k] / Count_ITEM[k]
for i in range(N):
if rmat_scipy[i,k] != 0:
S_Item[k] += rmat_scipy[i,k] - MEAN_USER[i]
for l in range(M):
for i in range(N):
if rmat_scipy[i,k] != 0 and rmat_scipy[i,l] != 0:
S_ItemItem[k,l] += (rmat_scipy[i,k]-MEAN_USER[i])*(rmat_scipy[i,l]-MEAN_USER[i])
#smat[k,l] = self._learn_similarities_(S_ItemItem[k,l],S_Item[k],S_Item[l],S_ItemItem[k,k],S_ItemItem[l,l],g,MEAN_ITEM[k],MEAN_ITEM[l],N)
#if S_ItemItem[k,l] != 0 and k!=l:
# print(k,l,smat[k,l] )
for k in range(M):
tmp = np.sqrt(S_ItemItem[k,k] - 2 * (MEAN_ITEM[k] - g) * S_Item[k] + ((MEAN_ITEM[k] - g)**2) * Count_ITEM[k])
if tmp != 0:
print(tmp,k,S_ItemItem[k,k],MEAN_ITEM[k],Count_ITEM[k],g)
for l in range(M):
# if k != l:
smat[k,l] = self._learn_similarities_(S_ItemItem[k,l],S_Item[k],S_Item[l],S_ItemItem[k,k],S_ItemItem[l,l],g,MEAN_ITEM[k],MEAN_ITEM[l],Count_ITEM[k],Count_ITEM[l])
# if S_ItemItem[k,l] != 0:
# print(k,l,smat[k,l], (r_copy[:,k].T @ r_copy[:, l]) )
#if rmat_scipy[k,l] != 0:
# print(rmat_scipy)
#print(smat)
# Given the summations
# Calculate the similarity between Item k and Item l
# Mean Centering is done with Item Means, Uesr Means, and Global Mean
def _learn_similarities_(self,Skl,Sk,Sl,Skk,Sll,g,Mk,Ml,N1,N2):
top = Skl - Mk*Sl - Ml*Sk + g*(Sk+Sl) - g * (Mk + Ml) * N1 + Mk * Ml * N1 + g*g*N1
down = np.sqrt(Skk - 2 * (Mk - g) * Sk + ((Mk - g)**2) * N1)
down*= np.sqrt(Sll - 2 * (Ml - g) * Sl + ((Ml - g)**2) * N2)
if down == 0:
return 0
return top / down
# Calculate the summations for unlearn supported learning algorithm(summation form of the native algorithm)
# Mean Centering is only done with Item Means
# csr_matrix version of _compute_similarities_unlearn_min_centering
# Not Vectorize Slow
def _compute_similarities_unlearn_min_centering_sparse_slow(self,rmat_scipy,items,users):
rmat_coo = rmat_scipy.tocoo()
rows, cols, vals = rmat_coo.row, rmat_coo.col, rmat_coo.data
N = len(users)
M = len(items)
SUM_ITEM = np.zeros(M)
Count_ITEM = np.zeros(M)
MEAN_ITEM = np.zeros(M)
Count_ITEMITEM_data = []
S_ITEM_data = []
S_ITEMITEM_data = []
II_ROWS, II_COLS = [], []
for i in range(rmat_scipy.nnz):
c, v = cols[i], vals[i]
SUM_ITEM[c] += v
Count_ITEM[c] += 1
MEAN_ITEM = SUM_ITEM / Count_ITEM
for i in range(N):
idx = np.argwhere(rows == i)
for k_idx in range(len(idx)):
for l_idx in range(len(idx)):
k = cols[idx[k_idx]][0]
l = cols[idx[l_idx]][0]
II_ROWS.append(k)
II_COLS.append(l)
Count_ITEMITEM_data.append(1)
s_ii = vals[idx[k_idx]][0] * vals[idx[l_idx]][0]
S_ITEMITEM_data.append(s_ii)
S_ITEM_data.append(vals[idx[k_idx]][0])
II_ROWS = np.array(II_ROWS)#.flatten()
II_COLS = np.array(II_COLS)#.flatten()
S_ITEM_data = np.array(S_ITEM_data)#.flatten()
S_ITEMITEM_data = np.array(S_ITEMITEM_data)#.flatten()
Count_ITEMITEM = sps.csr_matrix((Count_ITEMITEM_data, (II_ROWS,II_COLS)), shape=(M,M))
S_ITEM = sps.csr_matrix((S_ITEM_data, (II_ROWS,II_COLS)), shape=(M,M))
S_ITEMITEM = sps.csr_matrix((S_ITEMITEM_data, (II_ROWS,II_COLS)), shape=(M,M))
self.S_I_sparse = S_ITEM
self.S_II_sparse = S_ITEMITEM
self.N_II_sparse = Count_ITEMITEM
self.M_I = MEAN_ITEM
self.N_I = Count_ITEM
self.Sum_I = SUM_ITEM
self.M_I_sparse = sps.csr_matrix(MEAN_ITEM)
self.N_I_sparse = sps.csr_matrix(Count_ITEM)
self.Sum_I_sparse = sps.csr_matrix(SUM_ITEM)
self.N = N
self.M = M
self.smat_unlearn_sparse_csr = self._learn_sim_vectorize()
# Calculate the summations for unlearn supported learning algorithm(summation form of the native algorithm)
# Mean Centering is only done with Item Means
# Vectorize version of _compute_similarities_unlearn_min_centering_sparse_slow
# This is the fastest implementation while maintaining completeness
def _compute_similarities_unlearn_min_centering_sparse_vectorize(self,rmat_scipy,items,users):
N = len(users)
M = len(items)
rmat_mask = rmat_scipy.copy()
rmat_mask[rmat_scipy>0] = 1
self.S_I_sparse = rmat_scipy.transpose() @ rmat_mask
self.S_II_sparse = rmat_scipy.transpose() @ rmat_scipy
self.N_II_sparse = rmat_mask.transpose() @ rmat_mask
'''
##################################################
# An Affort to debug csr_matrix indexing is made
# The csr indexing and coordinate indexing should get the same result
# However this is not the case
##################################################
#self.S_I_sparse.sort_indices()
print("self.S_I_sparse.indices[self.S_I_sparse.indptr[138]:self.S_I_sparse.indptr[139]]: ",self.S_I_sparse.indices[self.S_I_sparse.indptr[138]:self.S_I_sparse.indptr[139]])
print("self.S_I_sparse.data[self.S_I_sparse.indptr[138]:self.S_I_sparse.indptr[139]]: ",self.S_I_sparse.data[self.S_I_sparse.indptr[138]:self.S_I_sparse.indptr[139]])
print("self.S_I_sparse[138,17]",self.S_I_sparse[138,17])
#print("self.S_I_sparse.getrow(138)")
#print(self.S_I_sparse.getrow(138))
print("self.S_I_sparse.indices[self.S_I_sparse.indptr[17]:self.S_I_sparse.indptr[18]],self.S_I_sparse[17,138]: ",self.S_I_sparse.indices[self.S_I_sparse.indptr[17]:self.S_I_sparse.indptr[18]])
print("self.S_I_sparse.data[self.S_I_sparse.indptr[17]:self.S_I_sparse.indptr[18]]: ",self.S_I_sparse.data[self.S_I_sparse.indptr[17]:self.S_I_sparse.indptr[18]])
print("self.S_I_sparse[17,138]",self.S_I_sparse[17,138])
#print("self.S_I_sparse.getrow(17): ")
#print(self.S_I_sparse.getrow(17))
'''
self.N_I = np.array(rmat_mask.sum(axis = 0)).squeeze()
self.Sum_I = np.array(rmat_scipy.sum(axis=0)).squeeze()
self.M_I = self.Sum_I / self.N_I
self.M_I_sparse = sps.csr_matrix(self.M_I)
self.N_I_sparse = sps.csr_matrix(self.N_I)
self.Sum_I_sparse = sps.csr_matrix(self.Sum_I)
self.N = N
self.M = M
self.smat_unlearn_sparse_csr = self._learn_sim_vectorize()
# Unlearn Algorithm
# Mean Centering is only done with Item Means
# Alternative version of _unlearn_min_centering
# Remove User u rating for Item t
# csr_matrix features are used to decrease time cost
# Fastest unlearning implementation while maintaining completeness
def _unlearn_min_centering_sparse(self,u,t,rmat_scipy,smat):
val_u_t = rmat_scipy[u,t]
self.Sum_I[t] -= val_u_t
self.M_I[t] = ( self.M_I[t] * self.N_I[t] - val_u_t ) / (self.N_I[t] - 1)
self.N_I[t] -= 1
for l in self.N_II_sparse.getrow(t).indices:
#k, l = rows[i], cols[i]
val_u_l = rmat_scipy[u,l]
if val_u_l > 0:
self.S_II_sparse[t,l] -= val_u_t * val_u_l
self.S_I_sparse[t,l] -= val_u_t
self.N_II_sparse[t,l] -= 1
#print(val_u_t * val_u_l,val_u_t,t,l)
if t != l:
self.S_II_sparse[l,t] = self.S_II_sparse[t,l]
self.S_I_sparse[l,t] = self.S_I_sparse[t,l]
self.N_II_sparse[l,t] -= 1
for k in smat.getrow(t).indices:
if k != t:
#smat[k,t] = self._learn_sim(self.S_II_sparse[k,t],self.S_II_sparse[k,k],self.S_II_sparse[t,t],self.S_I_sparse[k,t],self.S_I_sparse[t,k],self.M_I[0,k],self.M_I[0,t],self.N_II_sparse[k,t],self.N_I[0,k],self.N_I[0,t],self.Sum_I[0,k],self.Sum_I[0,t])
smat[k,t] = self._learn_sim(self.S_II_sparse[k,t],self.S_II_sparse[k,k],self.S_II_sparse[t,t],self.S_I_sparse[k,t],self.S_I_sparse[t,k],self.M_I[k],self.M_I[t],self.N_II_sparse[k,t],self.N_I[k],self.N_I[t],self.Sum_I[k],self.Sum_I[t])
#print(self.S_II_sparse[k,t],self.S_II_sparse[k,k],self.S_II_sparse[t,t],self.S_I_sparse[k,t],self.S_I_sparse[t,k],self.M_I[0,k],self.M_I[0,t],self.N_II_sparse[k,t],self.N_I[0,k],self.N_I[0,t],self.Sum_I[0,k],self.Sum_I[0,t])
smat[t,k] = smat[k,t]
# Alternative version of _compute_similarities_unlearn_min_centering_sparse_vectorize
# They have similar speed but alternations are made to support _unlearn_min_centering_matrix
def _compute_similarities_unlearn_min_centering_matrix_vectorize(self,rmat_scipy,items,users):
N = len(users)
M = len(items)
rmat_mask = rmat_scipy.copy()
rmat_mask[rmat_scipy>0] = 1
self.S_I_sparse = rmat_scipy.transpose() @ rmat_mask
self.S_II_sparse = rmat_scipy.transpose() @ rmat_scipy
self.N_II_sparse = rmat_mask.transpose() @ rmat_mask
self.S_I_matrix = matrix.CSR(self.S_I_sparse.shape[0], self.S_I_sparse.shape[1], self.S_I_sparse.nnz,
self.S_I_sparse.indptr.copy(), self.S_I_sparse.indices.copy(), self.S_I_sparse.data)
self.S_II_matrix = matrix.CSR(self.S_II_sparse.shape[0], self.S_II_sparse.shape[1], self.S_II_sparse.nnz,
self.S_II_sparse.indptr.copy(), self.S_II_sparse.indices.copy(), self.S_II_sparse.data)
self.N_II_matrix = matrix.CSR(self.N_II_sparse.shape[0], self.N_II_sparse.shape[1], self.N_II_sparse.nnz,
self.N_II_sparse.indptr.copy(), self.N_II_sparse.indices.copy(), self.N_II_sparse.data)
self.N_I = rmat_mask.sum(axis = 0)
self.Sum_I = rmat_scipy.sum(axis=0)
self.M_I = self.Sum_I / self.N_I
self.M_I_sparse = sps.csr_matrix(self.M_I)
self.N_I_sparse = sps.csr_matrix(self.N_I)
self.Sum_I_sparse = sps.csr_matrix(self.Sum_I)
self.N = N
self.M = M
self.smat_unlearn_sparse_csr = self._learn_sim_vectorize()
# Unlearn Algorithm
# Mean Centering is only done with Item Means
# Alternative version of _unlearn_min_centering_sparse
# Fully ultilize csr indexing and resulted in faster speed
# However, it is bugged and cannpt maintain completeness
def _unlearn_min_centering_matrix(self,u,t,rmat_scipy,smat,rmat):
val_u_t = rmat_scipy[u,t]
self.Sum_I[0,t] -= val_u_t
self.M_I[0,t] = ( self.M_I[0,t] * self.N_I[0,t] - val_u_t ) / (self.N_I[0,t] - 1)
self.N_I[0,t] -= 1
rmat_u_colinds = rmat.row_cs(u)
for l_idx, l in enumerate(self.S_I_matrix.row_cs(t)):
rmat_idx = np.where(rmat_u_colinds == l)[0]
if len(rmat_idx == 1):
val_u_l = rmat.values[rmat.rowptrs[u]+rmat_idx]
self.S_II_matrix.values[self.S_I_matrix.rowptrs[t]+l_idx] -= val_u_l * val_u_t
self.S_I_matrix.values[self.S_II_matrix.rowptrs[t]+l_idx] -= val_u_t
self.N_II_matrix.values[self.N_II_matrix.rowptrs[t]+l_idx] -= 1
for k_idx, k in enumerate(smat.getrow(t).indices):
if k != t:
#print(k,t,smat[k,t])
indices_k = self.N_II_matrix.colinds[self.N_II_matrix.rowptrs[k]:self.N_II_matrix.rowptrs[k+1]]
kt_idx = self.N_II_matrix.rowptrs[k] + np.where(indices_k == t)[0]
kk_idx = self.N_II_matrix.rowptrs[k] + np.where(indices_k == k)[0]
indices_t = self.N_II_matrix.colinds[self.N_II_matrix.rowptrs[t]:self.N_II_matrix.rowptrs[t+1]]
tk_idx = self.N_II_matrix.rowptrs[t] + np.where(indices_t == k)[0]
tt_idx = self.N_II_matrix.rowptrs[t] + np.where(indices_t == t)[0]
smat.data[smat.indptr[t]+k_idx] = self._learn_sim(
self.S_II_matrix.values[kt_idx],
self.S_II_matrix.values[kk_idx],
self.S_II_matrix.values[tt_idx],
self.S_I_matrix.values[kt_idx],
self.S_I_matrix.values[tk_idx],
self.M_I[0,k],
self.M_I[0,t],
self.N_II_matrix.values[kt_idx],
self.N_I[0,k],
self.N_I[0,t],
self.Sum_I[0,k],
self.Sum_I[0,t])
#print(self.S_I_matrix.values[kt_idx],self.S_I_sparse[k,t],self.S_I_matrix.row_cs(k),self.S_I_matrix.row_vs(k))
#print(self.S_I_matrix.values[tk_idx],self.S_I_sparse[t,k],self.S_I_matrix.row_cs(t),self.S_I_matrix.row_vs(t))
smat[k,t] = smat[t,k]
#print(smat.data[smat.indptr[t]+k_idx])
#self.S_I_sparse.eliminate_zeros()
#self.S_II_sparse.eliminate_zeros()
#self.N_II_sparse.eliminate_zeros()
smat.eliminate_zeros()
def _mean_center(self, ratings, rmat, items, users):
if not self.center:
return rmat, None
item_means = ratings.groupby('item').rating.mean()
item_means = item_means.reindex(items).values
user_means = ratings.groupby('user').rating.mean()
user_means = user_means.reindex(users).values
global_mean = ratings.rating.mean()
#mcvals = rmat.values - item_means[rmat.colinds] - user_means[rmat.rowinds()] + global_mean
#Old Mean Centering
mcvals = rmat.values - item_means[rmat.colinds]
nmat = matrix.CSR(rmat.nrows, rmat.ncols, rmat.nnz,
rmat.rowptrs.copy(), rmat.colinds.copy(), mcvals)
_logger.info('[%s] computed means for %d items', self._timer, len(item_means))
return nmat, item_means
def _normalize(self, rmat):
rmat = rmat.to_scipy()
# compute column norms
norms = spla.norm(rmat, 2, axis=0)
# and multiply by a diagonal to normalize columns
recip_norms = norms.copy()
is_nz = recip_norms > 0
#print(recip_norms[1],rmat.getcol(1))
recip_norms[is_nz] = np.reciprocal(recip_norms[is_nz])
norm_mat = rmat @ sps.diags(recip_norms)
assert norm_mat.shape[1] == rmat.shape[1]
# and reset NaN
norm_mat.data[np.isnan(norm_mat.data)] = 0
_logger.info('[%s] normalized rating matrix columns', self._timer)
return matrix.CSR.from_scipy(norm_mat, False)
def _compute_similarities(self, rmat, items, users):
mkl = matrix.mkl_ops()
mkl = None
if mkl is None:
return self._scipy_similarities(rmat,items,users)
else:
return self._mkl_similarities(mkl, rmat)
def _scipy_similarities(self, rmat,items,users):
nitems = rmat.ncols
sp_rmat = rmat.to_scipy()
#print(sp_rmat.tocoo())
_logger.info('[%s] multiplying matrix with scipy', self._timer)
smat = sp_rmat.T @ sp_rmat
smat = smat.tocoo()
#print(smat)
rows, cols, vals = smat.row, smat.col, smat.data
rows = rows[:smat.nnz]
cols = cols[:smat.nnz]
vals = vals[:smat.nnz]
rows, cols, vals = self._filter_similarities(rows, cols, vals)
csr = self._select_similarities(nitems, rows, cols, vals)
return csr
def _mkl_similarities(self, mkl, rmat):
nitems = rmat.ncols
assert rmat.values is not None
_logger.info('[%s] multiplying matrix with MKL', self._timer)
smat = mkl.csr_syrk(rmat)
rows = smat.rowinds()
cols = smat.colinds
vals = smat.values
rows, cols, vals = self._filter_similarities(rows, cols, vals)
del smat
nnz = len(rows)
_logger.info('[%s] making matrix symmetric (%d nnz)', self._timer, nnz)
rows = np.resize(rows, nnz * 2)
cols = np.resize(cols, nnz * 2)
vals = np.resize(vals, nnz * 2)
rows[nnz:] = cols[:nnz]
cols[nnz:] = rows[:nnz]
vals[nnz:] = vals[:nnz]
csr = self._select_similarities(nitems, rows, cols, vals)
return csr
def _filter_similarities(self, rows, cols, vals):
"Threshold similarites & remove self-similarities."
_logger.info('[%s] filtering %d similarities', self._timer, len(rows))
# remove self-similarity
mask = rows != cols
# remove too-small similarities
if self.min_sim is not None:
mask = np.logical_and(mask, vals >= self.min_sim)
_logger.info('[%s] filter keeps %d of %d entries', self._timer, np.sum(mask), len(rows))
return rows[mask], cols[mask], vals[mask]
def _select_similarities(self, nitems, rows, cols, vals):
_logger.info('[%s] ordering similarities', self._timer)
csr = matrix.CSR.from_coo(rows, cols, vals, shape=(nitems, nitems))
csr.sort_values()
if self.save_nbrs is None or self.save_nbrs <= 0:
return csr
_logger.info('[%s] picking %d top similarities', self._timer, self.save_nbrs)
counts = csr.row_nnzs()
_logger.debug('have %d rows in size range [%d,%d]',
len(counts), np.min(counts), np.max(counts))
ncounts = np.fmin(counts, self.save_nbrs)
_logger.debug('will have %d rows in size range [%d,%d]',
len(ncounts), np.min(ncounts), np.max(ncounts))
assert np.all(ncounts <= self.save_nbrs)
assert np.all(ncounts >= 0)
nnz = np.sum(ncounts)
rp2 = np.zeros_like(csr.rowptrs)
rp2[1:] = np.cumsum(ncounts)
ci2 = np.zeros(nnz, np.int32)
vs2 = np.zeros(nnz)
for i in range(nitems):
sp1 = csr.rowptrs[i]
sp2 = rp2[i]
ep1 = sp1 + ncounts[i]
ep2 = sp2 + ncounts[i]
assert ep1 - sp1 == ep2 - sp2
ci2[sp2:ep2] = csr.colinds[sp1:ep1]
vs2[sp2:ep2] = csr.values[sp1:ep1]
return matrix.CSR(csr.nrows, csr.ncols, nnz, rp2, ci2, vs2)
def predict_for_user(self, user, items, ratings=None):
_logger.debug('predicting %d items for user %s', len(items), user)
if ratings is None:
if user not in self.user_index_:
_logger.debug('user %s missing, returning empty predictions', user)
return pd.Series(np.nan, index=items)
upos = self.user_index_.get_loc(user)
ratings = pd.Series(self.rating_matrix_.row_vs(upos),
index=pd.Index(self.item_index_[self.rating_matrix_.row_cs(upos)]))
if not ratings.index.is_unique:
wmsg = 'user {} has duplicate ratings, this is likely to cause problems'.format(user)
warnings.warn(wmsg, DataWarning)
# set up rating array
# get rated item positions & limit to in-model items
ri_pos = self.item_index_.get_indexer(ratings.index)
m_rates = ratings[ri_pos >= 0]
ri_pos = ri_pos[ri_pos >= 0]
rate_v = np.full(len(self.item_index_), np.nan, dtype=np.float_)
# mean-center the rating array
if self.center:
rate_v[ri_pos] = m_rates.values - self.item_means_[ri_pos]
else:
rate_v[ri_pos] = m_rates.values
_logger.debug('user %s: %d of %d rated items in model', user, len(ri_pos), len(ratings))
assert np.sum(np.logical_not(np.isnan(rate_v))) == len(ri_pos)
# set up item result vector
# ipos will be an array of item indices
i_pos = self.item_index_.get_indexer(items)
i_pos = i_pos[i_pos >= 0]
_logger.debug('user %s: %d of %d requested items in model', user, len(i_pos), len(items))
# scratch result array
iscore = np.full(len(self.item_index_), np.nan, dtype=np.float_)
# now compute the predictions
iscore = self._predict_agg(self.sim_matrix_.N,
len(self.item_index_),
(self.min_nbrs, self.nnbrs),
rate_v, i_pos)
nscored = np.sum(np.logical_not(np.isnan(iscore)))
if self.center:
iscore += self.item_means_
assert np.sum(np.logical_not(np.isnan(iscore))) == nscored
results = pd.Series(iscore, index=self.item_index_)
results = results[results.notna()]
results = results.reindex(items, fill_value=np.nan)
assert results.notna().sum() == nscored
_logger.debug('user %s: predicted for %d of %d items',
user, results.notna().sum(), len(items))
return results
def __str__(self):
return 'ItemItem(nnbrs={}, msize={})'.format(self.nnbrs, self.save_nbrs)
| 37,531 | 40.982103 | 263 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.