repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
finer | finer-main/models/bilstm.py | import tensorflow as tf
import numpy as np
from tf2crf import CRF
class BiLSTM(tf.keras.Model):
def __init__(
self,
n_classes,
n_layers=1,
n_units=128,
dropout_rate=0.1,
crf=False,
word2vectors_weights=None,
subword_pooling='all'
):
super().__init__()
self.n_classes = n_classes
self.n_layers = n_layers
self.n_units = n_units
self.dropout_rate = dropout_rate
self.crf = crf
self.subword_pooling = subword_pooling
self.embeddings = tf.keras.layers.Embedding(
input_dim=len(word2vectors_weights),
output_dim=word2vectors_weights.shape[-1],
weights=[word2vectors_weights],
trainable=False,
mask_zero=True
)
self.bilstm_layers = []
for i in range(n_layers):
self.bilstm_layers.append(
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=n_units,
activation='tanh',
recurrent_activation='sigmoid',
return_sequences=True,
name=f'BiLSTM_{i+1}'
)
)
)
if self.crf:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation=None
)
# Pass logits to a custom CRF Layer
self.crf_layer = CRF(output_dim=n_classes, mask=True)
else:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation='softmax'
)
def call(self, inputs, training=None, mask=None):
if self.subword_pooling in ['first', 'last']:
pooling_mask = inputs[1]
inputs = inputs[0]
inner_inputs = self.embeddings(inputs)
for i, bilstm_layer in enumerate(self.bilstm_layers):
encodings = bilstm_layer(inner_inputs)
if i != 0:
inner_inputs = tf.keras.layers.add([inner_inputs, encodings])
else:
inner_inputs = encodings
inner_inputs = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(inner_inputs, training=training)
outputs = self.classifier(inner_inputs)
if self.crf:
outputs = self.crf_layer(outputs, mask=tf.not_equal(inputs, 0))
if self.subword_pooling in ['first', 'last']:
outputs = tf.cast(tf.expand_dims(pooling_mask, axis=-1), dtype=tf.float32) * outputs
return outputs
def print_summary(self, line_length=None, positions=None, print_fn=None):
# Fake forward pass to build graph
batch_size, sequence_length = 1, 32
inputs = np.ones((batch_size, sequence_length), dtype=np.int32)
if self.subword_pooling in ['first', 'last']:
pooling_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
inputs = [inputs, pooling_mask]
self.predict(inputs)
self.summary(line_length=line_length, positions=positions, print_fn=print_fn)
if __name__ == '__main__':
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Init random seeds
np.random.seed(1)
tf.random.set_seed(1)
# Build test model
word2vectors_weights = np.random.random((30000, 200))
model = BiLSTM(
n_classes=10,
n_layers=2,
n_units=128,
dropout_rate=0.1,
crf=True,
word2vectors_weights=word2vectors_weights,
subword_pooling='all'
)
inputs = pad_sequences(np.random.randint(0, 30000, (5, 32)), maxlen=64, padding='post', truncating='post')
outputs = pad_sequences(np.random.randint(0, 10, (5, 32)), maxlen=64, padding='post', truncating='post')
if model.crf:
model.compile(optimizer='adam', loss=model.crf_layer.loss, run_eagerly=True)
else:
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', run_eagerly=True)
model.print_summary(line_length=150)
model.fit(x=inputs, y=outputs, batch_size=2)
predictions = model.predict(inputs, batch_size=2)
print(predictions) | 4,319 | 31 | 110 | py |
finer | finer-main/models/transformer.py | import tensorflow as tf
import numpy as np
from transformers import AutoTokenizer, TFAutoModel
from tf2crf import CRF
class Transformer(tf.keras.Model):
def __init__(
self,
model_name,
n_classes,
dropout_rate=0.1,
crf=False,
tokenizer=None,
subword_pooling='all'
):
super().__init__()
self.model_name = model_name
self.n_classes = n_classes
self.dropout_rate = dropout_rate
self.crf = crf
self.subword_pooling = subword_pooling
self.encoder = TFAutoModel.from_pretrained(
pretrained_model_name_or_path=model_name
)
if tokenizer:
self.encoder.resize_token_embeddings(
new_num_tokens=len(tokenizer.vocab))
if self.crf:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation=None
)
# Pass logits to a custom CRF Layer
self.crf_layer = CRF(output_dim=n_classes, mask=True)
else:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation='softmax'
)
def call(self, inputs, training=None, mask=None):
if self.subword_pooling in ['first', 'last']:
pooling_mask = inputs[1]
inputs = inputs[0]
encodings = self.encoder(inputs)[0]
encodings = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(encodings, training=training)
outputs = self.classifier(encodings)
if self.crf:
outputs = self.crf_layer(outputs, mask=tf.not_equal(inputs, 0))
if self.subword_pooling in ['first', 'last']:
outputs = tf.cast(tf.expand_dims(pooling_mask, axis=-1), dtype=tf.float32) * outputs
return outputs
def print_summary(self, line_length=None, positions=None, print_fn=None):
# Fake forward pass to build graph
batch_size, sequence_length = 1, 32
inputs = np.ones((batch_size, sequence_length), dtype=np.int32)
if self.subword_pooling in ['first', 'last']:
pooling_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
inputs = [inputs, pooling_mask]
self.predict(inputs)
self.summary(line_length=line_length, positions=positions, print_fn=print_fn)
if __name__ == '__main__':
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Init random seeds
np.random.seed(1)
tf.random.set_seed(1)
model_name = 'nlpaueb/sec-bert-base'
# Build test model
model = Transformer(
model_name=model_name,
n_classes=10,
dropout_rate=0.2,
crf=True,
tokenizer=None,
subword_pooling='all'
)
# inputs = pad_sequences(np.random.randint(0, 30000, (5, 32)), maxlen=64, padding='post', truncating='post')
inputs = [
'This is the first sentence',
'This is the second sentence',
'This is the third sentence',
'This is the fourth sentence',
'This is the last sentence, this is a longer sentence']
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name,
use_fast=True
)
inputs = tokenizer.batch_encode_plus(
batch_text_or_text_pairs=inputs,
add_special_tokens=False,
max_length=64,
padding='max_length',
return_tensors='tf'
).input_ids
outputs = pad_sequences(np.random.randint(0, 10, (5, 32)), maxlen=64, padding='post', truncating='post')
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5, clipvalue=5.0)
if model.crf:
model.compile(
optimizer=optimizer,
loss=model.crf_layer.loss,
run_eagerly=True
)
else:
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
run_eagerly=True
)
print(model.print_summary(line_length=150))
model.fit(x=inputs, y=outputs, batch_size=2)
model.predict(inputs, batch_size=1)
predictions = model.predict(inputs, batch_size=2)
print(predictions)
| 4,291 | 29.013986 | 112 | py |
finer | finer-main/models/__init__.py | from models.bilstm import BiLSTM
from models.transformer import Transformer
from models.transformer_bilstm import TransformerBiLSTM
| 132 | 32.25 | 55 | py |
finer | finer-main/configurations/configuration.py | import json
import os
import logging
from shutil import copy2
from time import strftime, gmtime
from configurations import CONFIG_DIR
from data import EXPERIMENTS_RUNS_DIR, VECTORS_DIR
parameters = {}
class ParameterStore(type):
def __getitem__(cls, key: str):
global parameters
return parameters[key]
def __setitem__(cls, key, value):
global parameters
parameters[key] = value
def __contains__(self, item):
global parameters
return item in parameters
class Configuration(object, metaclass=ParameterStore):
"""
Configuration class that contains all the parameters of the experiment.
The experiment parameters are loaded from the corresponding configuration file
(e.g. "finer_transformer.json)
It instantiates the experiment logger.
The output of the experiment is saved at the EXPERIMENTS_RUNS_DIR (data/experiments_runs)
"""
@staticmethod
def configure(method, mode):
global parameters
os.makedirs(name=os.path.join(EXPERIMENTS_RUNS_DIR), exist_ok=True)
os.makedirs(name=os.path.join(VECTORS_DIR), exist_ok=True)
if method in ['transformer', 'bilstm']:
config_filepath = os.path.join(CONFIG_DIR, f'{method}.json')
if os.path.exists(config_filepath):
with open(config_filepath) as config_file:
parameters = json.load(config_file)
else:
raise Exception(f'Configuration file "{method}.json" does not exist')
else:
raise NotImplementedError(f'"FINER-139" experiment does not support "{method}" method')
parameters['task'] = {}
parameters['task']['model'] = method
parameters['task']['mode'] = mode
parameters['configuration_filename'] = f'{method}.json'
# Setup Logging
timestamp = strftime("%Y_%m_%d_%H_%M_%S", gmtime())
log_name = f"FINER139_{timestamp}"
# Set experiment_path and create necessary directories
if mode == 'train':
experiment_path = os.path.join(EXPERIMENTS_RUNS_DIR, log_name)
os.makedirs(name=os.path.join(experiment_path, 'model'), exist_ok=True)
copy2(src=os.path.join(CONFIG_DIR, f'{method}.json'),
dst=os.path.join(experiment_path, f'{method}.json'))
elif mode == 'evaluate':
pretrained_model = parameters['evaluation']['pretrained_model']
if pretrained_model is None or pretrained_model.strip() == '':
raise Exception(f'No pretrained_model provided in configuration')
if os.path.exists(os.path.join(EXPERIMENTS_RUNS_DIR, pretrained_model, 'model')):
experiment_path = os.path.join(EXPERIMENTS_RUNS_DIR, pretrained_model)
pretrained_model_path = os.path.join(experiment_path, 'model')
else:
raise Exception(f'Model "{pretrained_model}" does not exist')
configuration_path = os.path.join(experiment_path, f'{method}.json')
if os.path.exists(configuration_path):
with open(configuration_path) as fin:
original_parameters = json.load(fin)
for key in ['train_parameters', 'general_parameters', 'hyper_parameters']:
parameters[key] = original_parameters[key]
parameters['task']['mode'] = mode
else:
raise Exception(f'Configuration "{configuration_path}" does not exist')
parameters['evaluation']['pretrained_model_path'] = pretrained_model_path
log_name = f"{log_name}_EVALUATE_{'_'.join(parameters['evaluation']['pretrained_model'].split(os.sep))}"
else:
raise Exception(f'Mode "{mode}" is not supported')
parameters['task']['log_name'] = log_name
parameters['experiment_path'] = experiment_path
# If in debug mode set workers and max_queue_size to minimum and multiprocessing to False
if parameters['general_parameters']['debug']:
parameters['general_parameters']['workers'] = 1
parameters['general_parameters']['max_queue_size'] = 1
parameters['general_parameters']['use_multiprocessing'] = False
parameters['general_parameters']['run_eagerly'] = True
# Clean loggers
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(level=logging.INFO,
format='%(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(experiment_path, f'{log_name}.txt'),
filemode='a')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
@classmethod
def __getitem__(cls, item: str):
global parameters
return parameters[item]
| 5,368 | 39.368421 | 116 | py |
finer | finer-main/configurations/__init__.py | import os
CONFIG_DIR = os.path.dirname(os.path.realpath(__file__))
from configurations.configuration import Configuration
| 122 | 29.75 | 56 | py |
finer | finer-main/data/__init__.py | import os
DATA_DIR = os.path.dirname(os.path.realpath(__file__))
EXPERIMENTS_RUNS_DIR = os.path.join(DATA_DIR, 'experiments_runs')
VECTORS_DIR = os.path.join(DATA_DIR, 'vectors')
| 179 | 35 | 65 | py |
TCPD | TCPD-master/build_tcpd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect and verify all time series that are not packaged in the repository.
Author: Gertjan van den Burg
License: See LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import platform
import os
DATASET_DIR = "./datasets"
TARGETS = [
("apple", "get_apple.py"),
("bee_waggle_6", "get_bee_waggle_6.py"),
("bitcoin", "get_bitcoin.py"),
("iceland_tourism", "get_iceland_tourism.py"),
("measles", "get_measles.py"),
("occupancy", "get_occupancy.py"),
("ratner_stock", "get_ratner_stock.py"),
("robocalls", "get_robocalls.py"),
("scanline_126007", "get_scanline_126007.py"),
("scanline_42049", "get_scanline_42049.py"),
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbose", help="Enable logging", action="store_true"
)
parser.add_argument(
"-o", "--output-dir", help="Output directory to store all time series"
)
parser.add_argument(
"action",
help="Action to perform",
choices=["collect", "clean"],
default="collect",
nargs="?",
)
return parser.parse_args()
def load_dataset_script(module_name, path):
"""Load the dataset collection script as a module
This is not a *super* clean way to do this, but it maintains the modularity
of the dataset, where each dataset can be downloaded individually as well
as through this script.
"""
version = platform.python_version_tuple()
if version[0] == "2":
import imp
module = imp.load_source(module_name, path)
elif version[0] == "3" and version[1] in ["3", "4"]:
from importlib.machinery import SourceFileLoader
module = SourceFileLoader(module_name, path).load_module()
else:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def run_dataset_func(name, script, funcname):
dir_path = os.path.join(DATASET_DIR, name)
get_path = os.path.join(dir_path, script)
module = load_dataset_script("tcpd.%s" % name, get_path)
func = getattr(module, funcname)
func(output_dir=dir_path)
def collect_dataset(name, script):
return run_dataset_func(name, script, "collect")
def clean_dataset(name, script):
return run_dataset_func(name, script, "clean")
def main():
args = parse_args()
log = lambda *a, **kw: print(*a, **kw) if args.verbose else None
if args.action == "collect":
func = collect_dataset
elif args.action == "clean":
func = clean_dataset
else:
raise ValueError("Unknown action: %s" % args.action)
for name, script in TARGETS:
log(
"Running %s action for dataset: %s ... " % (args.action, name),
end="",
flush=True,
)
func(name, script)
log("ok", flush=True)
if __name__ == "__main__":
main()
| 3,081 | 25.568966 | 80 | py |
TCPD | TCPD-master/examples/python/load_dataset.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example code for loading a dataset to a TimeSeries object.
Note that this code requires Pandas to be available.
Author: Gertjan van den Burg
Copyright: The Alan Turing Institute, 2019
License: See LICENSE file.
"""
import json
import numpy as np
import pandas as pd
class TimeSeries:
def __init__(
self,
t,
y,
name=None,
longname=None,
datestr=None,
datefmt=None,
columns=None,
):
self.t = t
self.y = y
self.name = name
self.longname = longname
self.datestr = datestr
self.datefmt = datefmt
self.columns = columns
# whether the series is stored as zero-based or one-based
self.zero_based = True
@property
def n_obs(self):
return len(self.t)
@property
def n_dim(self):
return self.y.shape[1]
@property
def shape(self):
return (self.n_obs, self.n_dim)
@classmethod
def from_json(cls, filename):
with open(filename, "rb") as fp:
data = json.load(fp)
tidx = np.array(data["time"]["index"])
tidx = np.squeeze(tidx)
if "format" in data["time"]:
datefmt = data["time"]["format"]
datestr = np.array(data["time"]["raw"])
else:
datefmt = None
datestr = None
y = np.zeros((data["n_obs"], data["n_dim"]))
columns = []
for idx, series in enumerate(data["series"]):
columns.append(series.get("label", "V%i" % (idx + 1)))
thetype = np.int if series["type"] == "integer" else np.float64
vec = np.array(series["raw"], dtype=thetype)
y[:, idx] = vec
ts = cls(
tidx,
y,
name=data["name"],
longname=data["longname"],
datefmt=datefmt,
datestr=datestr,
columns=columns,
)
return ts
@property
def df(self):
d = {"t": self.t}
for i in range(len(self.columns)):
col = self.columns[i]
val = self.y[:, i]
d[col] = val
return pd.DataFrame(d)
def make_one_based(self):
""" Convert the time index to a one-based time index. """
if self.zero_based:
self.t = [t + 1 for t in self.t]
self.zero_based = False
def __repr__(self):
return "TimeSeries(name=%s, n_obs=%s, n_dim=%s)" % (
self.name,
self.n_obs,
self.n_dim,
)
def __str__(self):
return repr(self)
| 2,654 | 22.289474 | 75 | py |
TCPD | TCPD-master/datasets/scanline_126007/get_scanline_126007.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the scanline_126007 dataset.
See the README file for more information.
Author: Gertjan van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import hashlib
import os
import numpy as np
import json
import sys
import time
from PIL import Image
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
IMG_URL = "https://web.archive.org/web/20070611200633im_/http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/segbench/BSDS300/html/images/plain/normal/gray/126007.jpg"
MD5_IMG = "0ca6db4848b6d319d94a37e697930fb4"
MD5_JSON = "057d5741b623308af00c42e2c8e525c3"
NAME_IMG = "126007.jpg"
NAME_JSON = "scanline_126007.json"
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_IMG)
def download_img(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(IMG_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download img. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(img_path, target_path=None):
name = "scanline_126007"
longname = "Scanline 126007"
index = 200
im = Image.open(img_path)
arr = np.array(im)
line = list(map(int, list(arr[index, :])))
series = [{"label": "Line %s" % index, "type": "int", "raw": line}]
data = {
"name": name,
"longname": longname,
"n_obs": len(line),
"n_dim": len(series),
"time": {"index": list(range(len(line)))},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
img_path = os.path.join(output_dir, NAME_IMG)
json_path = os.path.join(output_dir, NAME_JSON)
download_img(target_path=img_path)
write_json(img_path, target_path=json_path)
def clean(output_dir="."):
img_path = os.path.join(output_dir, NAME_IMG)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(img_path):
os.unlink(img_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,045 | 24.2875 | 181 | py |
TCPD | TCPD-master/datasets/shanghai_license/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
import clevercsv
def reformat_time(mmmyy):
""" From MMM-YY to %Y-%m """
MONTHS = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
mmm, yy = mmmyy.split("-")
Y = int(yy) + 2000
m = MONTHS.get(mmm)
return "%i-%02i" % (Y, m)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
rows.pop(0)
time = [reformat_time(r[0]) for r in rows]
values = [int(r[-1]) for r in rows]
# Manually split Jan-08 into two, see readme for details.
jan08idx = time.index("2008-01")
values[jan08idx] /= 2
time.insert(jan08idx + 1, "2008-02")
values.insert(jan08idx + 1, values[jan08idx])
name = "shanghai_license"
longname = "Shanghai License"
time_fmt = "%Y-%m"
series = [{"label": "No. of Applicants", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,956 | 20.744444 | 75 | py |
TCPD | TCPD-master/datasets/bee_waggle_6/get_bee_waggle_6.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the bee_waggle_6 dataset.
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import hashlib
import json
import math
import os
import zipfile
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
ZIP_URL = "https://web.archive.org/web/20191114130815if_/https://www.cc.gatech.edu/%7Eborg/ijcv_psslds/psslds.zip"
MD5_ZIP = "039843dc15c72fd5450eeb11c6e5599c"
MD5_JSON = "4f03feafecb3be0b069b3cb0d6b17d4f"
# known alternative checksums for small rounding errors
MD5_JSON_X = [
"71311783488ee5f1122545d24c15429b",
"3632e004b540de5c3eb049fb5591d044",
]
NAME_ZIP = "psslds.zip"
NAME_JSON = "bee_waggle_6.json"
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum, alt_checksums=None):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
if (
os.path.exists(target)
and alt_checksums
and any(check_md5sum(target, c) for c in alt_checksums)
):
print(
"Note: Matched alternative checksum for %s. "
"This indicates that small differences exist compared to "
"the original version of this time series, likely due to "
"rounding differences. Usually this is nothing to "
"worry about." % target,
file=sys.stderr,
)
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not (
check_md5sum(target, checksum)
or (
alt_checksums
and any(check_md5sum(target, c) for c in alt_checksums)
)
):
print(
"Warning: Generated dataset %s didn't match a "
"known checksum. This is likely due to "
"rounding differences caused by "
"different system architectures. Minor differences in "
"algorithm performance can occur for this dataset. "
% target,
file=sys.stderr,
)
return out
return wrapper
return validate_decorator
@validate(MD5_ZIP)
def download_zip(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(ZIP_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download zip. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON, MD5_JSON_X)
def write_json(zip_path, target_path=None):
with zipfile.ZipFile(zip_path) as thezip:
with thezip.open("psslds/zips/data/sequence6/btf/ximage.btf") as fp:
ximage = [float(l.strip()) for l in fp]
with thezip.open("psslds/zips/data/sequence6/btf/yimage.btf") as fp:
yimage = [float(l.strip()) for l in fp]
with thezip.open("psslds/zips/data/sequence6/btf/timage.btf") as fp:
timage = [float(l.strip()) for l in fp]
sintimage = [math.sin(t) for t in timage]
costimage = [math.cos(t) for t in timage]
name = "bee_waggle_6"
longname = "Bee Waggle no. 6"
series = [
{"label": "x", "type": "float", "raw": ximage},
{"label": "y", "type": "float", "raw": yimage},
{"label": "sin(theta)", "type": "float", "raw": sintimage},
{"label": "cos(theta)", "type": "float", "raw": costimage},
]
data = {
"name": name,
"longname": longname,
"n_obs": len(ximage),
"n_dim": len(series),
"time": {"index": list(range(len(ximage)))},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
zip_path = os.path.join(output_dir, NAME_ZIP)
json_path = os.path.join(output_dir, NAME_JSON)
download_zip(target_path=zip_path)
write_json(zip_path, target_path=json_path)
def clean(output_dir="."):
zip_path = os.path.join(output_dir, NAME_ZIP)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(zip_path):
os.unlink(zip_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main():
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 5,957 | 28.205882 | 114 | py |
TCPD | TCPD-master/datasets/construction/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import argparse
import json
import xlrd
MONTHS = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
def format_date(datestr):
""" expects: mmm-yyx with x an extraneous character or empty """
mmm, yyx = datestr.split("-")
midx = MONTHS[mmm]
if len(yyx) == 3:
yy = yyx[:2]
elif len(yyx) == 2:
yy = yyx
else:
raise ValueError
# this will break in 71 years
if yy.startswith("9"):
yyyy = 1900 + int(yy)
else:
yyyy = 2000 + int(yy)
return f"{yyyy}-{midx:02}"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
wb = xlrd.open_workbook(args.input_file)
ws = wb.sheet_by_index(0)
header = ws.row(3)
assert header[0].value == "Date"
by_month = {}
ridx = 4
while True:
# stop if date cell is empty
if ws.row(ridx)[0].ctype == xlrd.XL_CELL_EMPTY:
break
date_value = ws.row(ridx)[0].value
construct_value = ws.row(ridx)[1].value
date = format_date(date_value)
construct = int(construct_value)
by_month[date] = construct
ridx += 1
name = "construction"
longname = "US Construction Spending"
time = sorted(by_month.keys())
time_fmt = "%Y-%m"
values = [by_month[t] for t in time]
series = [
{
"label": "Total Private Construction Spending",
"type": "int",
"raw": values,
}
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 2,316 | 19.147826 | 68 | py |
TCPD | TCPD-master/datasets/lga_passengers/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import json
import argparse
import clevercsv
def month2index(month):
return {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}[month]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.DictReader(
fp, delimiter=",", quotechar="", escapechar=""
)
items = list(reader)
for it in items:
it["time"] = f"{it['Year']}-{month2index(it['Month'])}"
it["value"] = int(it["Total Passengers"])
lgas = [it for it in items if it["Airport Code"] == "LGA"]
pairs = [(it["time"], it["value"]) for it in lgas]
# with this date format string sort is date sort
pairs.sort()
name = "lga_passengers"
longname = "LaGuardia Passengers"
time_fmt = "%Y-%m"
time = [p[0] for p in pairs]
values = [p[1] for p in pairs]
series = [{"label": "Number of Passengers", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,932 | 21.476744 | 78 | py |
TCPD | TCPD-master/datasets/unemployment_nl/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Gertjan van den Burg
"""
import argparse
import clevercsv
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="UTF-8-SIG") as fp:
reader = clevercsv.reader(
fp, delimiter=";", quotechar='"', escapechar=""
)
rows = list(reader)
# remove rows we don't need
title = rows.pop(0)
meta = rows.pop(0)
meta = rows.pop(0)
# filter out rows we want
header = rows.pop(0)
eligible_population = rows.pop(0)
working_population = rows.pop(0)
unemployed_population = rows.pop(0)
years = header[3:]
eligible = list(map(int, eligible_population[3:]))
unemployed = list(map(int, unemployed_population[3:]))
# compute the percentage unemployed
by_year = {
y: (u / e * 100) for y, e, u in zip(years, eligible, unemployed)
}
# remove value of 2001 before revision
del by_year["2001 voor revisie"]
# rename value of 2001 after revision as simply '2001'
by_year["2001"] = by_year["2001 na revisie"]
del by_year["2001 na revisie"]
time = sorted(by_year.keys())
values = [by_year[t] for t in time]
series = [{"label": "V1", "type": "float", "raw": values}]
data = {
"name": "unemployment_nl",
"longname": "Unemployment rate (NL)",
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": "%Y",
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,974 | 23.6875 | 76 | py |
TCPD | TCPD-master/datasets/scanline_42049/get_scanline_42049.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the scanline_42049 dataset.
See the README file for more information.
Author: Gertjan van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import hashlib
import os
import numpy as np
import json
import sys
import time
from PIL import Image
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
IMG_URL = "https://web.archive.org/web/20070611230044im_/http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/segbench/BSDS300/html/images/plain/normal/gray/42049.jpg"
MD5_IMG = "75a3d395b4f3f506abb9edadacaa4d55"
MD5_JSON = "39921dfa959576bd0b3d6c95558f17f4"
NAME_IMG = "42049.jpg"
NAME_JSON = "scanline_42049.json"
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_IMG)
def download_img(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(IMG_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download img. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(img_path, target_path=None):
name = "scanline_42049"
longname = "Scanline 42049"
index = 170
im = Image.open(img_path)
arr = np.array(im)
line = list(map(int, list(arr[index, :])))
series = [{"label": "Line %s" % index, "type": "int", "raw": line}]
data = {
"name": name,
"longname": longname,
"n_obs": len(line),
"n_dim": len(series),
"time": {"index": list(range(len(line)))},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
img_path = os.path.join(output_dir, NAME_IMG)
json_path = os.path.join(output_dir, NAME_JSON)
download_img(target_path=img_path)
write_json(img_path, target_path=json_path)
def clean(output_dir="."):
img_path = os.path.join(output_dir, NAME_IMG)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(img_path):
os.unlink(img_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,038 | 24.402516 | 180 | py |
TCPD | TCPD-master/datasets/us_population/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
import clevercsv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
rows.pop(0)
# the time format is monthly, so we convert that here
time = [r[2][:-3] for r in rows]
time_fmt = "%Y-%m"
# source is in thousands, so we correct that here
values = [float(r[3]) * 1000 for r in rows]
name = "us_population"
longname = "US Population"
series = [{"label": "Population", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,428 | 20.984615 | 72 | py |
TCPD | TCPD-master/datasets/usd_isk/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Gertjan van den Burg
"""
import clevercsv
import json
import sys
def format_month(ymm):
year, month = ymm.split("M")
return f"{year}-{month}"
def main(input_filename, output_filename):
with open(input_filename, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.DictReader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
by_currency = {}
for row in rows:
cur = row["CURRENCY"]
if not cur in by_currency:
by_currency[cur] = []
by_currency[cur].append(row)
by_month = {}
for cur in by_currency:
for item in by_currency[cur]:
if item["Value"] == ":":
continue
month = item["TIME"]
if not month in by_month:
by_month[month] = {}
by_month[month][cur] = item
to_delete = []
for month in by_month:
if not len(by_month[month]) == 2:
to_delete.append(month)
for month in to_delete:
del by_month[month]
ratio = {}
for month in sorted(by_month.keys()):
usd = by_month[month]["US dollar"]
isk = by_month[month]["Icelandic krona"]
ratio[format_month(month)] = float(usd["Value"]) / float(isk["Value"])
tuples = [(m, ratio[m]) for m in ratio]
name = "usd_isk"
longname = "USD-ISK exhange rate"
data = {
"name": name,
"longname": longname,
"n_obs": len(tuples),
"n_dim": 1,
"time": {
"format": "%Y-%m",
"index": list(range(len(tuples))),
"raw": [t[0] for t in tuples],
},
"series": [
{
"label": "Exchange rate",
"type": "float",
"raw": [t[1] for t in tuples],
}
],
}
with open(output_filename, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
| 2,059 | 22.678161 | 78 | py |
TCPD | TCPD-master/datasets/jfk_passengers/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import json
import argparse
import clevercsv
def month2index(month):
return {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}[month]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.DictReader(
fp, delimiter=",", quotechar="", escapechar=""
)
items = list(reader)
for it in items:
it["time"] = f"{it['Year']}-{month2index(it['Month'])}"
it["value"] = int(it["Total Passengers"])
jfks = [it for it in items if it["Airport Code"] == "JFK"]
pairs = [(it["time"], it["value"]) for it in jfks]
# with this date format string sort is date sort
pairs.sort()
name = "jfk_passengers"
longname = "JFK Passengers"
time_fmt = "%Y-%m"
time = [p[0] for p in pairs]
values = [p[1] for p in pairs]
series = [{"label": "Number of Passengers", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,927 | 21.16092 | 78 | py |
TCPD | TCPD-master/datasets/centralia/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--subsample",
help="Number of observations to skip during subsampling",
type=int,
)
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r") as fp:
rows = [l.strip().split("\t") for l in fp]
time = []
values = []
for year, pop in rows:
time.append(year)
values.append(int(pop))
name = "centralia"
longname = "Centralia Pennsylvania Population"
time_fmt = "%Y"
series = [{"label": "Population", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,365 | 20.015385 | 68 | py |
TCPD | TCPD-master/datasets/iceland_tourism/get_iceland_tourism.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the iceland_tourism dataset
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import hashlib
import json
import openpyxl
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
XLSX_URL = "https://web.archive.org/web/20191121170223if_/https://www.ferdamalastofa.is/static/files/ferdamalastofa/Frettamyndir/2019/nov/visitors-to-iceland-2002-2019-oct.xlsx"
MD5_XLSX = "ec777afd95b01ca901aa00475fc284e5"
MD5_JSON = "8bbac4ca95319a865f2d58ff564f063d"
NAME_XLSX = "visitors-to-iceland-2002-2019-oct.xlsx"
NAME_JSON = "iceland_tourism.json"
MONTHS = {
"January": 1,
"February": 2,
"March": 3,
"April": 4,
"May": 5,
"June": 6,
"July": 7,
"August": 8,
"September": 9,
"October": 10,
"November": 11,
"December": 12,
}
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_XLSX)
def download_xlsx(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(XLSX_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download xlsx. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
def format_ym(year, month):
midx = MONTHS[month]
return "%i-%02d" % (int(year), midx)
@validate(MD5_JSON)
def write_json(xlsx_path, target_path=None):
wb = openpyxl.load_workbook(xlsx_path)
ws = wb.worksheets[2]
rows = list(ws.rows)
# hardcoding these row indices, not worth doing it nicely
header = rows[2]
column_idx = [
i
for i, c in enumerate(header)
if c.data_type == "n" and c.value and 2003 <= c.value < 2020
]
visitors = []
r_offset = 4
for c in column_idx:
for r in range(r_offset, r_offset + 12):
cell = ws.cell(r, c + 1)
if cell.value is None or str(cell.value) == "":
continue
year = header[c].value
month = ws.cell(r, 1).value
datestr = format_ym(year, month)
# eliminate some observations that were not in the original dataset
if datestr in ["2019-08", "2019-09", "2019-10"]:
continue
item = {"time": datestr, "value": int(cell.value)}
visitors.append(item)
name = "iceland_tourism"
longname = "Iceland Tourism"
data = {
"name": name,
"longname": longname,
"n_obs": len(visitors),
"n_dim": 1,
"time": {
"format": "%Y-%m",
"index": list(range(len(visitors))),
"raw": [v["time"] for v in visitors],
},
"series": [
{
"label": "Visitor Number",
"type": "int",
"raw": [v["value"] for v in visitors],
}
],
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
xlsx_path = os.path.join(output_dir, NAME_XLSX)
json_path = os.path.join(output_dir, NAME_JSON)
download_xlsx(target_path=xlsx_path)
write_json(xlsx_path, target_path=json_path)
def clean(output_dir="."):
xlsx_path = os.path.join(output_dir, NAME_XLSX)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(xlsx_path):
os.unlink(xlsx_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 5,389 | 24.424528 | 177 | py |
TCPD | TCPD-master/datasets/global_co2/get_global_co2.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the global_co2 dataset
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
from functools import wraps
from urllib.request import urlretrieve
CSV_URL = "ftp://data.iac.ethz.ch/CMIP6/input4MIPs/UoM/GHGConc/CMIP/mon/atmos/UoM-CMIP-1-1-0/GHGConc/gr3-GMNHSH/v20160701/mole_fraction_of_carbon_dioxide_in_air_input4MIPs_GHGConcentrations_CMIP_UoM-CMIP-1-1-0_gr3-GMNHSH_000001-201412.csv"
MD5_CSV = "a3d42f5e339f4c652b8ae80e830b6941"
MD5_JSON = "7c8edd8887f51a6f841cc9d806ab4e56"
NAME_CSV = "mole_fraction_of_carbon_dioxide_in_air_input4MIPs_GHGConcentrations_CMIP_UoM-CMIP-1-1-0_gr3-GMNHSH_000001-201412.csv"
NAME_JSON = "global_co2.json"
SAMPLE = 48
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_CSV)
def get_csv(target_path=None):
urlretrieve(CSV_URL, target_path)
def reformat_time(datestr):
""" From MMM-YY to %Y-%m """
MONTHS = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
dd, mmm, rest = datestr.split("-")
yyyy = rest.split(" ")[0]
m = MONTHS.get(mmm)
return "%s-%02d-%s" % (yyyy, m, dd)
@validate(MD5_JSON)
def write_json(csv_path, target_path=None):
with open(csv_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
header = rows.pop(0)
rows = [r for i, r in enumerate(rows) if i % SAMPLE == 0]
as_dicts = [{h: v for h, v in zip(header, row)} for row in rows]
by_date = {
reformat_time(d["datetime"]): float(d["data_mean_global"])
for d in as_dicts
}
# trim off anything before 1600
by_date = {k: v for k, v in by_date.items() if k.split("-")[0] >= "1600"}
time = sorted(by_date.keys())
values = [by_date[t] for t in time]
name = "global_co2"
longname = "Global CO2"
time_fmt = "%Y-%m-%d"
series = [{"label": "Mean", "type": "float", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(values),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
if time is None:
del data["time"]
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV,)
json_path = os.path.join(output_dir, NAME_JSON)
get_csv(target_path=csv_path)
write_json(csv_path, target_path=json_path)
def clean(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV,)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(csv_path):
os.unlink(csv_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,971 | 24.761658 | 239 | py |
TCPD | TCPD-master/datasets/robocalls/get_robocalls.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the robocalls dataset
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import bs4
import hashlib
import json
import os
import requests
import sys
import time
from functools import wraps
URL = "https://web.archive.org/web/20191027130452/https://robocallindex.com/history/time"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 "
"Safari/537.36"
}
MD5_JSON = "f67ec0ccb50f2a835912e5c51932c083"
MONTHS = {
"January": 1,
"February": 2,
"March": 3,
"April": 4,
"May": 5,
"June": 6,
"July": 7,
"August": 8,
"September": 9,
"October": 10,
"November": 11,
"December": 12,
}
NAME_HTML = "robocalls.html"
NAME_JSON = "robocalls.json"
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
# We can't validate the HTML as the wayback machine inserts the retrieval time
# in the HTML, so the checksum is not constant.
def write_html(target_path=None):
count = 0
jar = {}
tries = 10
while count < tries:
count += 1
error = False
try:
res = requests.get(URL, headers=HEADERS, cookies=jar)
except requests.exceptions.ConnectionError:
error = True
if error or not res.ok:
print(
"(%i/%i) Error getting URL %s. Retrying in 5 seconds."
% (count, tries, URL),
file=sys.stderr,
)
time.sleep(5)
continue
if error:
raise ValueError("Couldn't retrieve URL %s" % URL)
with open(target_path, "wb") as fp:
fp.write(res.content)
@validate(MD5_JSON)
def write_json(html_path, target_path=None):
with open(html_path, "rb") as fp:
soup = bs4.BeautifulSoup(fp, "html.parser")
items = []
table = soup.find(id="robocallers-detail-table-1")
for row in table.find_all(attrs={"class": "month-row"}):
tds = row.find_all("td")
month_year = tds[0].a.text
amount = tds[1].text
month, year = month_year.split(" ")
value = int(amount.replace(",", ""))
month_idx = MONTHS[month]
items.append({"time": "%s-%02d" % (year, month_idx), "value": value})
# During initial (manual) data collection it wasn't noticed that the first
# observation is at April 2015, not May 2015. Technically, this means that
# this series has a missing value at May 2015. However, because the
# annotators have considered the series as a consecutive series without the
# missing value, we do not add it in here. This way, the file that this
# script creates corresponds to what the annotators and algorithms have
# seen during the study.
apr2015 = next((it for it in items if it["time"] == "2015-04"), None)
apr2015["time"] = "2015-05"
by_date = {it["time"]: it["value"] for it in items}
# remove the observations that were not part of the original dataset
del by_date["2019-09"]
time = sorted(by_date.keys())
values = [by_date[t] for t in time]
series = [{"label": "V1", "type": "int", "raw": values}]
data = {
"name": "robocalls",
"longname": "Robocalls",
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": "%Y-%m",
"index": list(range(0, len(time))),
"raw": time,
},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
html_path = os.path.join(output_dir, NAME_HTML)
json_path = os.path.join(output_dir, NAME_JSON)
write_html(target_path=html_path)
write_json(html_path, target_path=json_path)
def clean(output_dir="."):
html_path = os.path.join(output_dir, NAME_HTML)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(html_path):
os.unlink(html_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 5,907 | 25.375 | 89 | py |
TCPD | TCPD-master/datasets/ozone/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import argparse
import clevercsv
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
header = rows.pop(0)
total = [r for r in rows if r[0] == "Total emissions"]
time = [r[2] for r in total]
values = [int(r[-1]) for r in total]
name = "ozone"
longname = "Ozone-Depleting Emissions"
time_fmt = "%Y"
series = [{"label": "Total Emissions", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,378 | 20.546875 | 73 | py |
TCPD | TCPD-master/datasets/homeruns/get_homeruns.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the homeruns dataset
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
# Original source of the batting csv file
CSV_URL = "https://web.archive.org/web/20191128150525if_/https://raw.githubusercontent.com/chadwickbureau/baseballdatabank/242285f8f5e8981327cf50c07355fb034833ce4a/core/Batting.csv"
MD5_CSV = "43d8f8135e76dcd8b77d0709e33d2221"
MD5_JSON = "987bbab63e2c72acba1c07325303720c"
NAME_CSV = "Batting.csv"
NAME_JSON = "homeruns.json"
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_CSV)
def download_csv(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(CSV_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download csv. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
def read_csv(csv_file):
with open(csv_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
header = rows.pop(0)
dicts = [dict(zip(header, row)) for row in rows]
AL = [d for d in dicts if d["lgID"] == "AL"]
years = sorted(set((d["yearID"] for d in AL)))
by_year = {
int(y): sum(int(d["HR"]) for d in [x for x in AL if x["yearID"] == y])
for y in years
}
return by_year
@validate(MD5_JSON)
def write_json(csv_path, target_path=None):
by_year = read_csv(csv_path)
name = "homeruns"
longname = "Homeruns"
time_fmt = "%Y"
time = sorted(by_year.keys())
values = [by_year[t] for t in time]
series = [
{"label": "American League Home Runs", "type": "int", "raw": values},
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(0, len(time))),
"raw": list(map(str, time)),
},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
download_csv(target_path=csv_path)
write_json(csv_path, target_path=json_path)
def clean(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(csv_path):
os.unlink(csv_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,748 | 24.532258 | 181 | py |
TCPD | TCPD-master/datasets/run_log/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import argparse
import clevercsv
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
header = rows.pop(0)
name = "run_log"
longname = "Run Log"
time = [r[0].rstrip("Z").replace("T", " ") for r in rows]
time_fmt = "%Y-%m-%d %H:%M:%S"
pace = [float(r[3]) for r in rows]
distance = [float(r[4]) for r in rows]
series = [
{"label": "Pace", "type": "float", "raw": pace},
{"label": "Distance", "type": "float", "raw": distance},
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,458 | 20.776119 | 72 | py |
TCPD | TCPD-master/datasets/measles/get_measles.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the measles dataset
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
DAT_URL = "https://web.archive.org/web/20191128124615if_/https://ms.mcmaster.ca/~bolker/measdata/ewmeas.dat"
MD5_DAT = "143d1dacd791df963674468c8b005bf9"
MD5_JSON = "e42afd03be893fc7deb98514c94fa4c7"
NAME_DAT = "ewmeas.dat"
NAME_JSON = "measles.json"
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_DAT)
def download_zip(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(DAT_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download zip. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(dat_path, target_path=None):
with open(dat_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=" ", quotechar="", escapechar=""
)
rows = list(reader)
as_dicts = {t: int(x) for t, x in rows}
time = sorted(as_dicts.keys())
values = [as_dicts[t] for t in time]
series = [{"label": "V1", "type": "int", "raw": values}]
data = {
"name": "measles",
"longname": "Measles cases (England & Wales)",
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": "%Y-%F",
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
dat_path = os.path.join(output_dir, NAME_DAT)
json_path = os.path.join(output_dir, NAME_JSON)
download_zip(target_path=dat_path)
write_json(dat_path, target_path=json_path)
def clean(output_dir="."):
dat_path = os.path.join(output_dir, NAME_DAT)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(dat_path):
os.unlink(dat_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,225 | 24.305389 | 108 | py |
TCPD | TCPD-master/datasets/gdp_argentina/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
import clevercsv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="UTF-8-SIG") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
rows = rows[4:]
header = rows.pop(0)
as_dicts = []
for row in rows:
as_dicts.append({h: v for h, v in zip(header, row)})
argentina = next(
(d for d in as_dicts if d["Country Name"] == "Argentina"), None
)
tuples = []
for key in argentina:
try:
ikey = int(key)
except ValueError:
continue
if not argentina[key]:
continue
tuples.append((ikey, float(argentina[key])))
name = "gdp_argentina"
longname = "GDP Argentina"
time = [str(t[0]) for t in tuples]
time_fmt = "%Y"
series = [
{
"label": "GDP (constant LCU)",
"type": "float",
"raw": [t[1] for t in tuples],
}
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,824 | 20.72619 | 76 | py |
TCPD | TCPD-master/datasets/brent_spot/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import argparse
import clevercsv
import json
SAMPLE = 10
def date_to_iso(datestr):
mm, dd, yyyy = list(map(int, datestr.split("/")))
return f"{yyyy}-{mm:02d}-{dd:02d}"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
rows = rows[5:]
rows = list(reversed(rows))
rows = [r for i, r in enumerate(rows) if i % SAMPLE == 0]
idx2000 = next((i for i, x in enumerate(rows) if x[0].endswith("2000")))
rows = rows[idx2000:]
name = "brent_spot"
longname = "Brent Spot Price"
time = [date_to_iso(r[0]) for r in rows]
time_fmt = "%Y-%m-%d"
values = [float(r[1]) for r in rows]
series = [{"label": "Dollars/Barrel", "type": "float", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,660 | 21.445946 | 76 | py |
TCPD | TCPD-master/datasets/bitcoin/get_bitcoin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Retrieve the bitcoin dataset.
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
CSV_URL = "https://web.archive.org/web/20191114131838if_/https://api.blockchain.info/charts/market-price?timespan=all&format=csv"
MD5_CSV = "9bd4f7b06d78347415f6aafe1d9eb680"
MD5_JSON = "f90ff14ed1fc0c3d47d4394d25cbce93"
NAME_CSV = "market-price.csv"
NAME_JSON = "bitcoin.json"
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_CSV)
def get_market_price(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(CSV_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download csv. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(csv_path, target_path=None):
rows = clevercsv.read_table(csv_path)
rows = rows[500:]
last_idx = next(
(i for i, r in enumerate(rows) if r[0] == "2019-06-19 00:00:00"), None
)
rows = rows[: (last_idx + 1)]
name = "bitcoin"
longname = "Bitcoin Price"
values = [float(r[1]) for r in rows]
time = [r[0].split(" ")[0] for r in rows]
time_fmt = "%Y-%m-%d"
series = [{"label": "USD/Bitcoin", "type": "float", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(0, len(time))),
"raw": time,
},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
get_market_price(target_path=csv_path)
write_json(csv_path, target_path=json_path)
def clean(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(csv_path):
os.unlink(csv_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,297 | 24.431953 | 129 | py |
TCPD | TCPD-master/datasets/occupancy/get_occupancy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the occupancy dataset.
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
SAMPLE = 16
TXT_URL = "https://web.archive.org/web/20191128145102if_/https://raw.githubusercontent.com/LuisM78/Occupancy-detection-data/master/datatraining.txt"
MD5_TXT = "e656cd731300cb444bd10fcd28071e37"
MD5_JSON = "bc6cd9adaf496fe30bf0e417d2c3b0c6"
NAME_TXT = "datatraining.txt"
NAME_JSON = "occupancy.json"
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_TXT)
def download_txt(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(TXT_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download txt. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(txt_path, target_path=None):
with open(txt_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
header = rows.pop(0)
header.insert(0, "id")
as_dicts = [dict(zip(header, r)) for r in rows]
var_include = ["Temperature", "Humidity", "Light", "CO2"]
time = [x["date"] for x in as_dicts]
time = [time[i] for i in range(0, len(time), SAMPLE)]
data = {
"name": "occupancy",
"longname": "Occupancy",
"n_obs": len(time),
"n_dim": len(var_include),
"time": {
"type": "string",
"format": "%Y-%m-%d %H:%M:%S",
"index": list(range(len(time))),
"raw": time,
},
"series": [],
}
for idx, var in enumerate(var_include, start=1):
lbl = "V%i" % idx
obs = [float(x[var]) for x in as_dicts]
obs = [obs[i] for i in range(0, len(obs), SAMPLE)]
data["series"].append({"label": lbl, "type": "float", "raw": obs})
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
txt_path = os.path.join(output_dir, NAME_TXT)
json_path = os.path.join(output_dir, NAME_JSON)
download_txt(target_path=txt_path)
write_json(txt_path, target_path=json_path)
def clean(output_dir="."):
txt_path = os.path.join(output_dir, NAME_TXT)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(txt_path):
os.unlink(txt_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| 4,628 | 25.301136 | 148 | py |
TCPD | TCPD-master/datasets/ratner_stock/get_ratner_stock.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the ratner_stock dataset.
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import yfinance
import sys
import time
from functools import wraps
from urllib.error import URLError
MD5_CSV = "db7406dc7d4eb480d73b4fe6c4bb00be"
MD5_JSON = "f7086ff916f35b88463bf8fd1857815e"
SAMPLE = 3
NAME_CSV = "SIG.csv"
NAME_JSON = "ratner_stock.json"
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
def write_csv(target_path=None):
count = 0
while count < 5:
count += 1
try:
sig = yfinance.download(
"SIG",
start="1988-07-14",
end="1995-08-23",
progress=False,
rounding=False,
)
sig.index = sig.index.tz_localize(None)
sig.round(6).to_csv(target_path, float_format="%.6f")
return
except URLError as err:
print(
"Error occurred (%r) when trying to download csv. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(csv_path, target_path=None):
with open(csv_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
header = rows.pop(0)
rows = [r for i, r in enumerate(rows) if i % SAMPLE == 0]
# take the first 600 rows
rows = rows[:600]
name = "ratner_stock"
longname = "Ratner Group Stock Price"
time = [r[0] for r in rows]
time_fmt = "%Y-%m-%d"
values = [None if r[4].strip() == "" else float(r[4]) for r in rows]
series = [{"label": "Close Price", "type": "float", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
@validate(MD5_JSON)
def write_patch(source_path, target_path=None):
# This patches rounding differences that started to occur around Feb 2021.
from lzma import decompress
from base64 import b85decode
from diff_match_patch import diff_match_patch
dmp = diff_match_patch()
diff = decompress(b85decode(BLOB)).decode("utf-8")
with open(source_path, "r") as fp:
new_json = fp.read()
patches = dmp.patch_fromText(diff)
patched, _ = dmp.patch_apply(patches, new_json)
with open(target_path, "w") as fp:
fp.write(patched)
def collect(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
write_csv(target_path=csv_path)
try:
write_json(csv_path, target_path=json_path)
need_patch = False
except ValidationError:
need_patch = True
if need_patch:
write_patch(json_path, target_path=json_path)
def clean(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(csv_path):
os.unlink(csv_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
BLOB = (
b"{Wp48S^xk9=GL@E0stWa8~^|S5YJf5-~z({om~JRV0>CMK>=)+?;9Q77%VlSe-n@RwTxDTAq"
b"Xux?siO{U6G@C3FaW~bN5Z*_f_oBtk6v71E|?<5o1eA9Ph0ws)8e&C5nX<N?S7g`v*e-B4M$"
b"xsUK<=t_0!jQw0{TE0!b-V#yoVUWo}I#>y<%CaK|DwqCb~NKalm6OSmI1f8nAh0~Q%o~@<b>"
b"vQLj7z)h-uL{Tu*hvOp00000nukx=XK5Ee00FrH#03BVY6KKKvBYQl0ssI200dcD"
)
if __name__ == "__main__":
main()
| 5,611 | 24.981481 | 88 | py |
TCPD | TCPD-master/datasets/gdp_croatia/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
import clevercsv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="UTF-8-SIG") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
rows = rows[4:]
header = rows.pop(0)
as_dicts = []
for row in rows:
as_dicts.append({h: v for h, v in zip(header, row)})
croatia = next(
(d for d in as_dicts if d["Country Name"] == "Croatia"), None
)
tuples = []
for key in croatia:
try:
ikey = int(key)
except ValueError:
continue
if not croatia[key]:
continue
tuples.append((ikey, int(croatia[key])))
name = "gdp_croatia"
longname = "GDP Croatia"
time = [str(t[0]) for t in tuples]
time_fmt = "%Y"
series = [
{
"label": "GDP (constant LCU)",
"type": "int",
"raw": [t[1] for t in tuples],
}
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,806 | 20.511905 | 76 | py |
TCPD | TCPD-master/datasets/gdp_iran/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
import clevercsv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r", newline="", encoding="UTF-8-SIG") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
rows = rows[4:]
header = rows.pop(0)
as_dicts = []
for row in rows:
as_dicts.append({h: v for h, v in zip(header, row)})
iran = next(
(d for d in as_dicts if d["Country Name"] == "Iran, Islamic Rep."),
None,
)
tuples = []
for key in iran:
try:
ikey = int(key)
except ValueError:
continue
if not iran[key]:
continue
tuples.append((ikey, float(iran[key])))
name = "gdp_iran"
longname = "GDP Iran"
time = [str(t[0]) for t in tuples]
time_fmt = "%Y"
series = [
{
"label": "GDP (constant LCU)",
"type": "float",
"raw": [t[1] for t in tuples],
}
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,812 | 20.329412 | 76 | py |
TCPD | TCPD-master/datasets/businv/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import argparse
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r") as fp:
lines = [l.strip() for l in fp]
# header data should be first three lines
# we use some asserts to ensure things are what we expect them to be
header = lines[:3]
assert header[-1] == "Total Business"
lines = lines[4:]
assert lines[0].startswith("1992")
by_month = {}
for line in lines:
# stop on first empty line
if not line.strip():
break
parts = [x for x in line.split(" ") if x.strip()]
assert len(parts) == 13 # year + 12 months
year = parts.pop(0)
for midx, v in enumerate(parts, start=1):
if v == ".":
break
by_month[f"{year}-{midx:02}"] = int(v)
name = "businv"
longname = "Business Inventory"
time = sorted(by_month.keys())
time_fmt = "%Y-%m"
values = [by_month[t] for t in time]
series = [{"label": "Business Inventory", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,812 | 22.545455 | 76 | py |
TCPD | TCPD-master/datasets/well_log/convert.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: G.J.J. van den Burg
"""
import json
import argparse
SAMPLE = 6
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r") as fp:
rows = [l.strip() for l in fp]
rows = [r for i, r in enumerate(rows) if i % SAMPLE == 0]
values = list(map(float, rows))
name = "well_log"
longname = "Well Log"
series = [{"label": "V1", "type": "float", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(values),
"n_dim": len(series),
"time": {"index": list(range(len(values)))},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
| 1,038 | 18.603774 | 63 | py |
TCPD | TCPD-master/datasets/apple/get_apple.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the apple dataset.
This script uses the yfinance package to download the data from Yahoo Finance
and subsequently reformats it to a JSON file that adheres to our dataset
schema. See the README file for more information on the dataset.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import yfinance
import sys
import time
from functools import wraps
from urllib.error import URLError
MD5_CSV = "9021c03bb9fea3f16ecc812d77926168"
MD5_JSON = "22edb48471bd3711f7a6e15de6413643"
SAMPLE = 3
NAME_CSV = "AAPL.csv"
NAME_JSON = "apple.json"
class ValidationError(Exception):
def __init__(self, filename):
self.message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project \n"
"if the error persists." % filename
)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
def get_aapl():
"""Get the aapl data frame from yfinance"""
date_start = "1996-12-12"
date_end = "2004-05-14"
# We use an offset here to catch potential off-by-one errors in yfinance.
date_start_off = "1996-12-10"
date_end_off = "2004-05-17"
aapl = yfinance.download(
"AAPL",
start=date_start_off,
end=date_end_off,
progress=False,
rounding=False,
threads=False,
)
# Get the actual date range we want
aapl = aapl[date_start:date_end]
aapl = aapl.copy()
# Drop the timezone information
aapl.index = aapl.index.tz_localize(None)
# On 2020-08-28 Apple had a 4-for-1 stock split, and this changed
# the historical prices and volumes in the Yahoo API by a factor of
# 4. Since the original dataset was constructed before this time,
# we correct this change here by using a hard-coded closing price.
# This ensures that the resulting dataset has the same values as
# used in the TCPDBench paper.
if (0.2131696 <= aapl["Close"][0] <= 0.2131697) or (
0.21317000 <= aapl["Close"][0] <= 0.21317001
):
aapl["Open"] = aapl["Open"] * 4
aapl["High"] = aapl["High"] * 4
aapl["Low"] = aapl["Low"] * 4
aapl["Close"] = aapl["Close"] * 4
# Adj Close doesn't adhere to factor 4
aapl["Volume"] = aapl["Volume"] // 4
return aapl
def write_csv(target_path=None):
count = 0
while count < 5:
count += 1
try:
aapl = get_aapl()
aapl.round(6).to_csv(target_path, float_format="%.6f")
return
except URLError as err:
print(
"Error occurred (%r) when trying to download csv. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(csv_path, target_path=None):
with open(csv_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.DictReader(
fp, delimiter=",", quotechar="", escapechar=""
)
rows = list(reader)
# offset to ensure drop is visible in sampled series
rows = rows[1:]
if SAMPLE:
rows = [r for i, r in enumerate(rows) if i % SAMPLE == 0]
time = [r["Date"] for r in rows]
close = [float(r["Close"]) for r in rows]
volume = [int(r["Volume"]) for r in rows]
name = "apple"
longname = "Apple Stock"
time_fmt = "%Y-%m-%d"
series = [
{"label": "Close", "type": "float", "raw": close},
{"label": "Volume", "type": "int", "raw": volume},
]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(0, len(time))),
"raw": time,
},
"series": series,
}
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
@validate(MD5_JSON)
def write_patch(source_path, target_path=None):
# This patches rounding differences that started to occur around Feb 2021.
from lzma import decompress
from base64 import b85decode
from diff_match_patch import diff_match_patch
dmp = diff_match_patch()
diff = decompress(b85decode(BLOB)).decode("utf-8")
with open(source_path, "r") as fp:
new_json = fp.read()
patches = dmp.patch_fromText(diff)
patched, _ = dmp.patch_apply(patches, new_json)
with open(target_path, "w") as fp:
fp.write(patched)
def collect(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
write_csv(target_path=csv_path)
try:
write_json(csv_path, target_path=json_path)
need_patch = False
except ValidationError:
need_patch = True
if need_patch:
write_patch(json_path, target_path=json_path)
def clean(output_dir="."):
csv_path = os.path.join(output_dir, NAME_CSV)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(csv_path):
os.unlink(csv_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
BLOB = (
b"{Wp48S^xk9=GL@E0stWa8~^|S5YJf5;Ev@JL0teKV0>CMKFPfOjxe~QIT64tOZFjeJj%l`;B"
b"amH0GxlrmpS<nf9+SitRxKpHG3{qgy`x<uF?Z=^6ZYmt{-@RZ3^b&8;@H?s(M*j+oo9EcIkq"
b"&q9p+xLenHF8}SQq($c`MxOJxG`cqyU+fOCCXiB0HwyBM6&a|}3&`~iiwg5v<a818c-Z7LxI"
b"@M5eiYBiG*-hq%oed8X3|2}BW<0Teu^hA1F!;p=@+FTTylZxa7eqj9i_qPBM(pzWc@^3v<F&"
b"E)j(%5Gl>)!co^a*SMzZW?A7E?%W<p+#W(qnmw}YxYMVhJo`_m?i$gzVvg-uG;R`BFue41(I"
b"uVm0O$hp#LX*Uc?#FCsge}vHMB}Ys`Am<hrtNCsCv2<X8Ex}B2YD-ZE8R&MQXj1yXjO;KqT?"
b"y#Pd5Esz6|}xJ5Z+u8;(g%PO4#Hg1&x<NB^^*dLo}8mob~4Z=CIvGU1x<Aa&$+4oXnuRLjzE"
b"mq%Zx+wtZ#~;oFx~G)&Fb=<0<j|IZeuOt-G%2f}~_Hjzs?2?l<Cow<qZFx#xNC$^ZyW8UC#B"
b"(Ib}X%~rSoD&m{U=VZmR65LL2H2tF96(g<u`SCbLZ=*tF0Y;>X+sP#S{w8VTqfOngKo*0Qmb"
b"470B-^2cHk_yHd?Tn=LDUBdk)oweNj$dEGf3S%X#-gQRZutB4aCPz0ij!A^~(2(6gd#8hv&y"
b"&4>by!x|@Zl23=t%w9dXbgi&hEprgTEqZaJyk8piSx?$rQPmcqrFg$0iuNHHPg&^mt|PYH^!"
b"G6*U#$YO@1esf;CGL_@=rR;sg}1SmXG?+0r?UOl@6eUo<L}KNxDo&3;EF9yA(aidQW4z`U17"
b"=FA|@hgoZ~$0M@3XM<R(_0O1I*rg&M!A}P4q+jhi7TNq_LFc1&gy#;_>JI>i0v$hE?e4F&UE"
b"{ZeRZP=jk-TpkB(q)Ch?xbi?;yTZ`<<5*iYv%51zhC|0rO%PkGLaI!z}T1qrCazYA*tGRDG5"
b"*ga2v8vlKD1g-GU<}Qa`K}rC-FP0fC*K$;4*jWAlHqHmR89J0|aTUuJ?QH|>gMkhrC{xwevX"
b"WxGygUq8m8ClFpsp6~NOpEOqzJWEN!FK4w?UsN_lq7F-jEoBaAmcl_9W-lwo&%=aHH$~5L@`"
b"CPn$2n8`Pa*L%5ZpKqXN2%+^N7z~_wK4GXVmsYja%%qixFTmY#DGN8fPyjaogWl{hOmhb6L4"
b"}M#5&iCoY5t(aRo!KUxcG<(5>jLA9a$Vx3xrAyZS=7DlcGw+u~$Lr)zsC>8Xb`>KHk8h_+`("
b"=!vIEDh{<hl8}R9b6fgcf3)z=K=p0Tfs`{v%ZP4fZQhH3@X(D<6gFo7+3Otz>%6N2_zS#Wn$"
b"KP4t8m2?7Ue3KNx=mB^pG!+WcU!tF!gNH8c%&jo>BI8(E;}sa^9Kh73mmMIBx;!vcnpu+9;2"
b"e<ES){JVyu?B<~No}`!A%FLf2Xwgq9lS8CI3t+)T3bb@mvc2!Ub;EUzMn^;TdIrAieGI?JOr"
b"dgRDw&-J<<(>CCV#~u*{%86*IJ~f7~hd{Ke|=iT}hZnLOR#eIh#xe`~9rYB>A)?Pmn&+mI1B"
b"UK_Ml_GLQ&`gO-{GX<<3j@Ke?~+~V7vXjhn;teTWx$wPjiHBd3~%4M9h@Km)H221G@m`TSOf"
b"m<NnA$B)K16;()!cXlvDvNI0%gq^!BjGkuw=)kuzdt_+PpC;3WQ1RV%pZ;VKS0_SRq{utNF2"
b"QWTI)*vheTq=i<!S{%|QKx>Vv4<{T@s14;{#Nw{^XTWT+4RK8W9T0TAPbHOUfu7)5M)8spg8"
b"%RhPvrnyvdrph9U{Bg?S^&%vNvX+fyJ&|zXq3+Tf*s6Xbfj&h2+Rb+3j6HzBREQbNMfHdTbU"
b"Vj!46UM&C?g?Q!x?OG7&O!HD2fog*n3f#(VzWV6@b{&>Awy992;50L>$2<O$+%rqMp(c+9CV"
b"{!+Xf{LTkl5%oh01pwV1x#P?WG+kuDoNRKJ!<dl}x2kRI~m?z8y?|;LPmmvTo7giQ{MEe$X>"
b"7Q2`%wI!Bhsujc%619IDHmD~pAC_iV`8qULr4-5{Gc`eUQ92P01YmbdrN!tZ7^9=6q%lu*#w"
b"uYs=`P(Ygs<b*TeZc#S$1Q4;j)^LYcRZ=0@I)HwDHov9}EQ6Ne6<OQk}FsKS*6*N>fdWp_O@"
b"#)WJGzx>1*r$)(r!>Sa*sVKrT++x7<>U#5vEJsMe7_^s(gxcEz49_vQs96PMY0bls8KM4Otd"
b"fwjZ>Cn?<?)QTbg?j1QX*8W(nIStDZ>YU?ok1-c(ciYpH7+d!a$sHgzVJdOy7^<a?4EA*|A9"
b")xuSlXyANV!)pM~5MH%q(kW@^=-J?beiC|u)2>eivq(Yj_!`t7Z_}7jOt>5SyUIY|A1OtHrH"
b";M^?N}8B7y~LOX&5$h??rW`xH(QOkY}!0)2Z|v<Qedk_lJy2QiX2W6gwLU(Go4sqW)1&DzhR"
b"W9e%zVCzTZY|P)4^1**aW$N9o*|3ZZ(C)j95d25q%|gTJ_Lmc)1mxT_L#E!N;GmwKZdX^ECJ"
b"bDSz*okP;bU;*_}mGx4HS@&bY@D?h#OBss#-{~q`ez{%BS13At+iiq}>$T1^ok!L&x|s3pd|"
b"J`>C><APe#*okMLg5%NFM(Z92V}ob|VeCNdFt&Vg>QiBMDTu7t?%ON%6T(vq^@66xTr`Aq4o"
b"bbm%T{)4u2ks)=Z<FdXZRUgYILu<Hxo5Ug@~5c+y3fm2ddW1KH!VsMYNNtmzH0m_A6WDfT+U"
b"DAtMO)Mbjh^DVQ=DgZ@A1{Nh&WA9l8=!<i{2jr;=kb4w3qTq^s6tQzF)635K0DbanGtpa&Js"
b"ejc=fGh^-4$_NPd&!Yv)-tIMTdAx*Y(@Qw4-t6Xp$Lu{P=wglKrOjXsUJ0@h5m`5si*f-eyc"
b"UGWy^2Kd3TyTP0QBQuA;Rq>q<0eOa6SWqg;Gyu>}y)kRFDkBfFs!l&;?t(p7@*;PxqDIvBB%"
b"AX97l0kQPlz-lfcPr$NU-MYjHis8&C5JlxPbfBQc@J3EQSAivq*p>(bJbe%v>|2WX_`osx|a"
b"ziOtSu?c0qG2YS^hJ93N7f_7R|`O+Qr{yW(3><p!F!*pGP5Dk%bme4pV8p$bl^Hp$_<S=sDM"
b"ewuTWf+r_1L!=a*Bxrh^Z5Li3~u?t`5j4Am>K%3c#h}S`H*H!&@LI0tN$(YB958;x>uv2J&@"
b"P(WB?{aMgx85@-J02Iw8oE`QKF`rk*I_a1A14DKs>a-uMo<qZQ2`D;dXqAn0h6s=R4FCal{W"
b"I^X2p{ebiiQaF`tI-0@CiA{Na-7=GR{DwMnT6Yn{?l1``@NHE;s(nlr(W;WqaT$}_tH?rELr"
b"B8Fb|3$N+ffrx0Z<?{Cxae%`mIoNt>z)X9mvr-DvMhp(A@WY!EHC{EmWfpUxX`K9Hn$4^~(4"
b"@ubbi9EcPNgMC>SK3G<we!|ilVQ8X+Fd`|4C(y`363rk`0ZwXy<wXek>wgTWN`h!*~lJRbka"
b"P87p4RqW94#!S}SwPw#X%kf&cZxCANu%RMI@eR0^y$C1{N12BrsUA}#tzMB{1U4-mOnjx0pJ"
b"@JKF4@YNH<esoe<h$RgHe1KS<x}fF{!r_-IrdC-15~=vF&!0gWpyW*ucbc%8;X-!KdX|9EI}"
b"-+tPClt^~_{+f*(dg7~6-p4mEu*fSB1*;pS1a@G_N$4D|e58by3@-O7jXeFA`N>ahIPlHYrT"
b"fsa_6>rpm)~+$cw8A6Wt`(c&zS|z!=AE%TbgROrdJVe@>I^?tuZiPP|eR-weuj`*&RokYha+"
b")nwMdihMlSVH|?O{Z+A2$j}5O)8JMDc#na{_@2Fx>e3QO(F5R-SkNk_gMmt5Qc@&tjwBJ<5("
b"6evECo#>){$#Y@vlDLU`|BI>Qs;~|)2;<nIRQ8VtzAjUyzyzRixNu=wRzF2?I&sHTN|qQfHO"
b"w|3eRB0w#22q0+&vxZ9u0;&jpI-%zK)B7T^pnAM#rEQ$(+!S=`tqix~#J2caf#r{uu?=P}$k"
b"QGIxbNf|q@^$O_8QKuQ)g9#5hEqW}gXS3a@F)mizh#F~$u;?(K)Ek~x1hjmborB<tb(>?u*j"
b"x0hTXL$U2fO~=n&1(^!JV|?E>m}bQAz)Lv;0}{stEehDw71LN6mEO2R99o3weq8_Mfp*7Xyp"
b"4W*?0q_I0pGC)3*Y6OdhfoDre2=9*1*O@CO?2yCe(tub|HOSW`WM3WD^hGuW!aU?xbc^H=Fv"
b"?$9_jBvk@r$}zwLgZ)uTCr-N{n}01KVE?#X&i<ySlZWo!I$8`MluF!fa4COxaAvxMjtzzNf`"
b"4G9%^!H704F{RXvkNQx-lUfY;9zInkfh5DAf|~j;hxZu$b>z&?5gt;uz3Hj$9`<u9sGQ?"
b"+Z_niu8hj)HK!9XkU6XRvr&bI<z?q1p2=P&aqQ!iXVe<lh%f(&jadBdWZVnF#<4rZJ3DE8>&"
b"J+*GH-KZGm>J|3JZXV{+zL3PSSKB%{g{bYE2?h(qbx}OSs~!#{w?u%Q0Rdv969f)$L)IEMJQ"
b"TBX9V3$2~237)n{Hs>iq1a2x9I6SyH%Hmn8yTvjI_G@8Etj`ppZ&dm7%`2xk($`b9>O?;m07"
b"$0W?ak=+MVE8?QB=}-PH4EY4G^QrZ$95@AcH{443wRaC;Ur1)MUy1%t@Tq_`I^H)R7EDiVrW"
b"d0AyZMz!;L~T{P-2wx1p>TwsU^m*Q&4R6JT>8d@yxcc!t~)eXsN#Ate9m7TFpMrvU0@DqNUZ"
b"1m{M=&^h64vVx<O4I`(uhy%Qd=`Uh^v%3ouV7^ptFG>{e%>d{fVX2M0z8BzVVJP3X*H}AZ<J"
b"($18jF-JxHZWKYqe{;<ov43!S@XCmB<DpvU-}0g(9$1R&9N@SaDqd_<fOjJ!Algyw81#W3dU"
b"5FT+l4dKV8D0dytCPEz$AB&B7Pak%?BryPBZTvkFHN7XD|=G9anWGJSMlhHiktAJ<{UM&ijV"
b"{<IVSFCz0j!7hi0mUbdZ0K=3QFMAS%JgbGNPZZXHAJkP|F36q#`egm*`Jo{r80P}BvQ>f-NZ"
b"S-HRV8cm(g;9aET2x9>jIq(iG%(+UbVxvIvc%8y%@&BpQF!jJX%xHNV`Zlr6}RyI1cYK?+s?"
b"Ny3~>#QU;pWkakTq8X~=?k7A*i_W1qE1Hu8-@WKS*K;NWekZ5ThD7s?6-U2aEy-%OQjWBAbx"
b"pI9T7FWcAJWc6KqHa-E1<<)xm65-`O<r-a`e!6dc6T!=S!<ua<Vzh?-1N#VJ3M-i1se_$X1I"
b"@8SR*)v2vubq5PtqT}Ky6<bK6ov9iD+Ukiab@<_U%vC=Ig3fr#!fO+q)2=~9M%#YEKBvP<k_"
b"~H;jx`4d=1F(b&7AFnEIRgcfthnu{tmAs^Hu<h`mSnXcpo`xm;}PY*@m9upcR3UdkMhO}ll8"
b"k74*c+_^sbv_)eWsGYR<g-i};%WpO*Re2aQ7Re7=|Q>dS%=4#K-VTF9_BZavHENNuTX9UOZ)"
b"9O$miSJQauagM%b7Z-K3a=^N1Cl}F&$h~baz42+?<1%Ey%0^qI-AO-EfULbU<?Ei82n<GW)#"
b"t%<BiBMPI0;MyE+gPKpO=B@yx685Xzy!1V>`3>KTtDG5b5#*a+X)?IuoN37fq9nB%YV^6Fhg"
b"ypQ<90;nMmBLik>VzmYn##?sNGAIug(^>zDgY7xBHjx)HO7Tk%5(Gzxx3kRoYWv@2J(M6S-9"
b"PEFuEr~{F+X|J<+>RJ4Oa)PqiZf1?b0fmJo&P>Vb+o-Pue#QO<k~`mwg0j|Jv{&Lv`Pshndy"
b"Hnuyf)8K}W<<ru=kEIig2os&+%XKeppJm;fyd7cWwcLzEaDxlWU9AYb_U%MKDBV;P@qwoeh1"
b"ytpF|ef6__SGgg-2XGc8h8fn+UXKTFFt}V8%PzAS6=b?Y(bDYFzCCL(HaYZ<Fitly)l9?|M;"
b"KC8zh7D=*H10wci1q7sFt;jbkD<H_<@Naku1HvMd26}E$x7dV7HK;U{&;#@@v~8gvsSSMiNY"
b"g!A!on?7kx!ANx>z-hV}yF#}XfI&&m!wWNh^){$ZVm>jmRtDHbevBw#o_NJ4W4)POR$3g}w4"
b"UTa`iE1#2f}~!kSUDZl^K9b6hrh3G0P1`dmx4oDvC35(bfW-MnWPx-|4T{sh4Rs*<vHN9Oe^"
b"Vc`<<ZuTA}T}td&ssv-*xdR)grr&$v`*$|^uBSaTw?68ywi6dC>{#%J)#;^#x%NY3V6p^HiP"
b"YlIl$iOWHToMB(KleJD%N#k168gwwpI?XiVJlnhm>5aLBK|^OG@z&^LMA=(N(P(IZdZS)ZK2"
b"iI5F>JD=00000TO!Fq)kn{C00G@5=A8lnO;(*3vBYQl0ssI200dcD"
)
if __name__ == "__main__":
main()
| 13,677 | 39.111437 | 88 | py |
TCPD | TCPD-master/utils/plot_dataset.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility script to plot datasets and annotations.
Author: G.J.J. van den Burg
Copyright (c) 2020 - The Alan Turing Institute
License: See the LICENSE file.
"""
import argparse
import datetime
import json
import matplotlib.pyplot as plt
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--result-file",
help="JSON file with results from a change point detection method",
)
parser.add_argument(
"-o", "--output-file", help="Output file to save the figure to"
)
parser.add_argument("input", help="Input dataset file (in JSON format)")
return parser.parse_args()
def frac_to_dt(number):
number = float(number)
year = int(float(number))
remainder = number - year
begin = datetime.datetime(year, 1, 1)
end = datetime.datetime(year + 1, 1, 1)
seconds = remainder * (end - begin).total_seconds()
return begin + datetime.timedelta(seconds=seconds)
def load_data(filename):
with open(filename, "rb") as fid:
data = json.load(fid)
title = data["name"]
y = data["series"][0]["raw"]
if "time" in data and "format" in data["time"]:
fmt = data["time"]["format"]
if fmt == "%Y.%F":
x = list(map(frac_to_dt, data["time"]["raw"]))
else:
try:
x = pd.to_datetime(
data["time"]["raw"], format=data["time"]["format"]
)
except ValueError:
x = list(range(1, len(y) + 1))
else:
x = list(range(1, len(y) + 1))
as_dict = {"x": x}
for idx, series in enumerate(data["series"]):
as_dict["y" + str(idx)] = series["raw"]
df = pd.DataFrame(as_dict)
return df, title
def load_result(filename):
with open(filename, "r") as fp:
data = json.load(fp)
if not data["status"] == "SUCCESS":
print("Detection wasn't successful.")
return None
return data["result"]["cplocations"]
def main():
args = parse_args()
df, title = load_data(args.input)
results = None
if args.result_file:
results = load_result(args.result_file)
has_date = False
try:
_ = df["x"].dt
has_date = True
except AttributeError:
pass
fig, axes = plt.subplots(df.shape[1] - 1, 1, squeeze=False)
for idx, col in enumerate(df.columns[1:]):
if has_date:
axes[idx, 0].plot_date(df["x"], df[col], ".", color="tab:blue")
axes[idx, 0].plot_date(df["x"], df[col], "-", color="tab:blue")
if results:
for loc in results:
if loc == 0:
continue
if loc == df.shape[0]:
continue
pos = df["x"].values[loc]
axes[idx, 0].axvline(x=pos, linestyle="--", color="red")
else:
axes[idx, 0].scatter(df["x"], df[col], color="tab:blue")
axes[idx, 0].plot(df["x"], df[col], color="tab:blue")
if results:
for loc in results:
if loc == 0:
continue
if loc == df.shape[0]:
continue
pos = df["x"].values[loc]
axes[idx, 0].axvline(x=pos, linestyle="--", color="red")
fig.suptitle(title)
if args.output_file:
plt.savefig(args.output_file, transparent=True)
else:
plt.show()
if __name__ == "__main__":
main()
| 3,606 | 27.401575 | 76 | py |
TCPD | TCPD-master/utils/validate_dataset.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Validate the dataset schema of a given file.
Note that this script requires the ``jsonschema`` package.
Author: G.J.J. van den Burg
License: This file is part of TCPD. See the LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import json
import jsonschema
import os
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--schema-file",
help="Schema file to use",
default="./schema.json",
)
parser.add_argument("-d", "--dataset-dir", help="Dataset directory")
parser.add_argument(
"datafile", help="JSON file with a TCPD time series", nargs="?"
)
parser.add_argument(
"-v", "--verbose", help="Enable verbose mode", action="store_true"
)
return parser.parse_args()
def load_schema(schema_file):
if not os.path.exists(schema_file):
raise FileNotFoundError(schema_file)
with open(schema_file, "rb") as fp:
schema = json.load(fp)
return schema
def find_datafiles(dataset_dir):
data_files = {}
datadirs = os.listdir(dataset_dir)
for ddir in datadirs:
pth = os.path.join(dataset_dir, ddir)
files = os.listdir(pth)
json_files = [f for f in files if f.endswith(".json")]
for jf in json_files:
jfpath = os.path.join(pth, jf)
if jf in data_files:
raise KeyError("Duplicate data file '%s'?" % jfpath)
data_files[jf] = jfpath
return data_files
def validate_dataset(filename, schema_file=None):
"""Validate a dataset file against the schema and other requirements
"""
if not os.path.exists(filename):
return "File not found."
with open(filename, "rb") as fp:
try:
data = json.load(fp)
except json.JSONDecodeError as err:
return "JSON decoding error: %s" % err.msg
try:
schema = load_schema(schema_file)
except FileNotFoundError:
return "Schema file not found."
try:
jsonschema.validate(instance=data, schema=schema)
except jsonschema.ValidationError as err:
return "JSONSchema validation error: %s" % err.message
if len(data["series"]) != data["n_dim"]:
return "Number of dimensions and number of series don't match"
if "time" in data.keys():
if not "format" in data["time"] and "raw" in data["time"]:
return "'raw' must be accompanied by format"
if "format" in data["time"] and not "raw" in data["time"]:
return "Format must be accompanied by 'raw'"
if "index" in data["time"]:
if not data["time"]["index"][0] == 0:
return "Index should start at zero."
if not len(data["time"]["index"]) == data["n_obs"]:
return "Number of indices must match number of observations"
if "raw" in data["time"]:
if len(data["time"]["raw"]) != data["n_obs"]:
return "Number of time points doesn't match number of observations"
if None in data["time"]["raw"]:
return "Null is not supported in time axis. Use 'NaN' instead."
has_missing = False
for var in data["series"]:
if len(var["raw"]) != data["n_obs"]:
return "Number of observations doesn't match for %s" % var["label"]
if float("nan") in var["raw"]:
return "NaN is not supported in series. Use null instead."
has_missing = has_missing or any(map(lambda x: x is None, var["raw"]))
# this doesn't exist yet, so let's not implement it until we need it.
if data["n_dim"] > 1 and has_missing:
return "Missing values are not yet supported for multidimensional data"
return None
def main():
args = parse_args()
log = lambda *a, **kw: print(*a, **kw) if args.verbose else None
if args.dataset_dir:
datafiles = find_datafiles(args.dataset_dir)
for dset in datafiles:
log("Validating %s" % dset)
result = validate_dataset(
datafiles[dset], schema_file=args.schema_file
)
if not result is None:
print(
"Dataset: %s. Error: %s" % (dset, result), file=sys.stderr
)
raise SystemExit(1)
else:
result = validate_dataset(args.datafile, schema_file=args.schema_file)
if not result is None:
print("Error: %s" % result, file=sys.stderr)
raise SystemExit(1)
log("Validation passed.")
if __name__ == "__main__":
main()
| 4,659 | 30.486486 | 83 | py |
TCPD | TCPD-master/utils/check_checksums.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Validate the datasets by checksum
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import hashlib
import os
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--checksum-file", help="Checksum file (json)", required=True
)
parser.add_argument(
"-d", "--dataset-dir", help="Dataset directory", required=True
)
parser.add_argument(
"-v", "--verbose", help="Enable verbose mode", action="store_true"
)
return parser.parse_args()
def md5sum(filename):
with open(filename, "rb") as fp:
data = fp.read()
return hashlib.md5(data).hexdigest()
def load_checksums(checksum_file):
with open(checksum_file, "r") as fp:
checksums = json.load(fp)
assert checksums["kind"] == "md5"
return checksums["checksums"]
def find_datafiles(dataset_dir):
data_files = {}
datadirs = os.listdir(dataset_dir)
for ddir in datadirs:
pth = os.path.join(dataset_dir, ddir)
files = os.listdir(pth)
json_files = [f for f in files if f.endswith(".json")]
for jf in json_files:
jfpath = os.path.join(pth, jf)
if jf in data_files:
raise KeyError("Duplicate data file '%s'?" % jfpath)
data_files[jf] = jfpath
return data_files
def main():
args = parse_args()
log = lambda *a, **kw: print(*a, **kw) if args.verbose else None
checksums = load_checksums(args.checksum_file)
data_files = find_datafiles(args.dataset_dir)
for fname in checksums:
log("Checking %s" % fname)
if not fname in data_files:
raise FileNotFoundError("Missing data file: %s" % fname)
md5 = md5sum(data_files[fname])
if isinstance(checksums[fname], list):
if not md5 in checksums[fname]:
raise ValueError(
"Checksums don't match for file: %s" % (data_files[fname])
)
else:
if not md5 == checksums[fname]:
raise ValueError(
"Checksums don't match for file: %s" % (data_files[fname])
)
log("All ok.")
if __name__ == "__main__":
main()
| 2,382 | 24.902174 | 78 | py |
UDAStrongBaseline | UDAStrongBaseline-master/sbs_traindbscan_unc.py | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import DBSCAN
# from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
# from torch.nn import init
#
# from UDAsbs.utils.rerank import compute_jaccard_dist
from UDAsbs import datasets, sinkhornknopp as sk
from UDAsbs import models
from UDAsbs.trainers import DbscanBaseTrainer_unc_ema
from UDAsbs.evaluators import Evaluator, extract_features
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint#, copy_state_dict
from UDAsbs.memorybank.NCEAverage import onlinememory
from UDAsbs.utils.faiss_rerank import compute_jaccard_distance
# import ipdb
start_epoch = best_mAP = 0
def get_data(name, data_dir, l=1):
root = osp.join(data_dir)
dataset = datasets.create(name, root, l)
label_dict = {}
for i, item_l in enumerate(dataset.train):
# dataset.train[i]=(item_l[0],0,item_l[2])
if item_l[1] in label_dict:
label_dict[item_l[1]].append(i)
else:
label_dict[item_l[1]] = [i]
return dataset, label_dict
def get_train_loader(dataset, height, width, choice_c, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.596, 0.558, 0.497])
])
train_set = trainset #dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances, choice_c)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
# train_loader = IterLoader(
# DataLoader(UnsupervisedCamStylePreprocessor(train_set, root=dataset.images_dir, transform=train_transformer,
# num_cam=dataset.num_cam,camstyle_dir=dataset.camstyle_dir, mutual=True),
# batch_size=batch_size, num_workers=0, sampler=sampler,#workers
# shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
from UDAsbs.models.dsbn import convert_dsbn
from torch.nn import Parameter
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
name = name.replace('module.', '')
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def create_model(args, ncs, wopre=False):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
if not wopre:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
print('load pretrain model:{}'.format(args.init_1))
# adopt domain-specific BN
convert_dsbn(model_1)
convert_dsbn(model_1_ema)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
for i, cl in enumerate(ncs):
exec('model_1_ema.module.classifier{}_{}.weight.data.copy_(model_1.module.classifier{}_{}.weight.data)'.format(i,cl,i,cl))
return model_1, None, model_1_ema, None
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
class Optimizer:
def __init__(self, target_label, m, dis_gt, t_loader,N, hc=3, ncl=None, n_epochs=200,
weight_decay=1e-5, ckpt_dir='/',fc_len=3500):
self.num_epochs = n_epochs
self.momentum = 0.9
self.weight_decay = weight_decay
self.checkpoint_dir = ckpt_dir
self.N=N
self.resume = True
self.checkpoint_dir = None
self.writer = None
# model stuff
self.hc = len(ncl)#10
self.K = ncl#3000
self.K_c =[fc_len for _ in range(len(ncl))]
self.model = m
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.L = [torch.LongTensor(target_label[i]).to(self.dev) for i in range(len(self.K))]
self.nmodel_gpus = 4#len()
self.pseudo_loader = t_loader#torch.utils.data.DataLoader(t_loader,batch_size=256)
# can also be DataLoader with less aug.
self.train_loader = t_loader
self.lamb = 25#args.lamb # the parameter lambda in the SK algorithm
self.cpu=True
self.dis_gt=dis_gt
dtype_='f64'
if dtype_ == 'f32':
self.dtype = torch.float32 if not self.cpu else np.float32
else:
self.dtype = torch.float64 if not self.cpu else np.float64
self.outs = self.K
# activations of previous to last layer to be saved if using multiple heads.
self.presize = 2048#4096 #
def optimize_labels(self):
if self.cpu:
sk.cpu_sk(self)
else:
sk.gpu_sk(self)
# save Label-assignments: optional
# torch.save(self.L, os.path.join(self.checkpoint_dir, 'L', str(niter) + '_L.gz'))
# free memory
data = 0
self.PS = 0
return self.L
import collections
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def write_sta_im(train_loader):
label2num=collections.defaultdict(int)
save_label=[]
for x in train_loader:
label2num[x[1]]+=1
save_label.append(x[1])
labels=sorted(label2num.items(),key=lambda item:item[1])[::-1]
num = [j for i, j in labels]
distribution = np.array(num)/len(train_loader)
return num,save_label
def print_cluster_acc(label_dict,target_label_tmp):
num_correct = 0
for pid in label_dict:
pid_index = np.asarray(label_dict[pid])
pred_label = np.argmax(np.bincount(target_label_tmp[pid_index]))
num_correct += (target_label_tmp[pid_index] == pred_label).astype(np.float32).sum()
cluster_accuracy = num_correct / len(target_label_tmp)
print(f'cluster accucary: {cluster_accuracy:.3f}')
class uncer(object):
def __init__(self):
self.sm = torch.nn.Softmax(dim=1)
self.log_sm = torch.nn.LogSoftmax(dim=1)
# self.cross_batch=CrossBatchMemory()
self.kl_distance = nn.KLDivLoss(reduction='none')
def kl_cal(self,pred1,pred1_ema):
variance = torch.sum(self.kl_distance(self.log_sm(pred1),
self.sm(pred1_ema.detach())), dim=1)
exp_variance = torch.exp(-variance)
return exp_variance
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters > 0) else None
ncs = [int(x) for x in args.ncs.split(',')]
# ncs_dbscan=ncs.copy()
dataset_target, label_dict = get_data(args.dataset_target, args.data_dir, len(ncs))
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
tar_cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dataset_source, _ = get_data(args.dataset_source, args.data_dir, len(ncs))
sour_cluster_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers,
testset=dataset_source.train)
train_loader_source = get_train_loader(dataset_source, args.height, args.width, 0, args.batch_size, args.workers,
args.num_instances, args.iters, dataset_source.train)
source_classes = dataset_source.num_train_pids
distribution,_ = write_sta_im(dataset_source.train)
fc_len = 3500
model_1, _, model_1_ema, _ = create_model(args, [fc_len for _ in range(len(ncs))])
# print(model_1)
epoch = 0
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=100)
target_features = F.normalize(torch.stack(list(target_features_dict.values())), dim=1)
# Calculate distance
print('==> Create pseudo labels for unlabeled target domain')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
del target_features
if (epoch == 0):
# DBSCAN cluster
eps = 0.6 # 0.6
print('Clustering criterion: eps: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
# num_ids = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
plabel=[]
new_dataset=[]
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:
continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# Initialize source-domain class centroids
print("==> Initialize source-domain class centroids in the hybrid memory")
source_features, _ = extract_features(model_1, sour_cluster_loader, print_freq=50)
sour_fea_dict = collections.defaultdict(list)
print("==> Ending source-domain class centroids in the hybrid memory")
for f, pid, _ in sorted(dataset_source.train):
sour_fea_dict[pid].append(source_features[f].unsqueeze(0))
source_centers = [torch.cat(sour_fea_dict[pid], 0).mean(0) for pid in sorted(sour_fea_dict.keys())]
source_centers = torch.stack(source_centers, 0)
source_centers = F.normalize(source_centers, dim=1)
del sour_fea_dict, source_features, sour_cluster_loader
# Evaluator
evaluator_1 = Evaluator(model_1)
evaluator_1_ema = Evaluator(model_1_ema)
clusters = [args.num_clusters] * args.epochs# TODO: dropout clusters
k_memory=8192
contrast = onlinememory(2048, len(new_dataset),sour_numclass=source_classes,K=k_memory+source_classes,
index2label=target_label, choice_c=args.choice_c, T=0.07,
use_softmax=True).cuda()
contrast.index_memory = torch.cat((torch.arange(source_classes), -1*torch.ones(k_memory).long()), dim=0).cuda()
contrast.memory = torch.cat((source_centers, torch.rand(k_memory, 2048)), dim=0).cuda()
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset), fc_len=fc_len)
uncertainty=collections.defaultdict(list)
print("Training begining~~~~~~!!!!!!!!!")
for epoch in range(len(clusters)):
iters_ = 300 if epoch % 1== 0 else iters
if epoch % 6 == 0 and epoch !=0:
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=50)
target_features = torch.stack(list(target_features_dict.values()))
target_features = F.normalize(target_features, dim=1)
print('==> Create pseudo labels for unlabeled target domain with')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
plabel = []
new_dataset = []
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset),fc_len=fc_len)
contrast.index_memory = torch.cat((torch.arange(source_classes), -1 * torch.ones(k_memory).long()),
dim=0).cuda()
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
target_label_o = o.L
target_label = [list(np.asarray(target_label_o[0].data.cpu())+source_classes)]
contrast.index2label = [[i for i in range(source_classes)] + target_label[0]]
# change pseudo labels
for i in range(len(new_dataset)):
new_dataset[i] = list(new_dataset[i])
for j in range(len(ncs)):
new_dataset[i][j+1] = int(target_label[j][i])
new_dataset[i] = tuple(new_dataset[i])
cc=args.choice_c#(args.choice_c+1)%len(ncs)
train_loader_target = get_train_loader(dataset_target, args.height, args.width, cc,
args.batch_size, args.workers, args.num_instances, iters_, new_dataset)
# Optimizer
params = []
flag = 1.0
# if 20<epoch<=40 or 60<epoch<=80 or 120<epoch:
# flag=0.1
# else:
# flag=1.0
for key, value in model_1.named_parameters():
if not value.requires_grad:
print(key)
continue
params += [{"params": [value], "lr": args.lr*flag, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = DbscanBaseTrainer_unc_ema(model_1, model_1_ema, contrast, None,None,
num_cluster=ncs, c_name=ncs,alpha=args.alpha, fc_len=fc_len,
source_classes=source_classes, uncer_mode=args.uncer_mode)
train_loader_target.new_epoch()
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_target, train_loader_source, optimizer, args.choice_c,
lambda_tri=args.lambda_tri, lambda_ct=args.lambda_ct, lambda_reg=args.lambda_reg,
print_freq=args.print_freq, train_iters=iters_,uncertainty_d=uncertainty)
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model' + str(mid) + '_checkpoint.pth.tar'))
if epoch==20:
args.eval_step=2
elif epoch==40:
args.eval_step=1
if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
mAP_1 = 0#evaluator_1.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
# cmc_flag=False)
mAP_2 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
cmc_flag=False)
is_best = (mAP_1 > best_mAP) or (mAP_2 > best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1, (is_best), best_mAP, 1)
save_model(model_1_ema, (is_best and (mAP_1 <= mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-st', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-tt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--choice_c', type=int, default=0)
parser.add_argument('--num-clusters', type=int, default=700)
parser.add_argument('--ncs', type=str, default='60')
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_multi',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--iters', type=int, default=300)
parser.add_argument('--lambda-value', type=float, default=0)
# training configs
parser.add_argument('--rr-gpu', action='store_true',
help="use GPU for accelerating clustering")
# parser.add_argument('--init-1', type=str, default='logs/personxTOpersonxval/resnet_ibn50a-pretrain-1_gem_RA//model_best.pth.tar', metavar='PATH')
parser.add_argument('--init-1', type=str,
default='logs/market1501TOdukemtmc/resnet50-pretrain-1005/model_best.pth.tar',
metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=5)
parser.add_argument('--n-jobs', type=int, default=8)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs/d2m_baseline/tmp'))
parser.add_argument('--lambda-tri', type=float, default=1.0)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--lambda-ct', type=float, default=0.05)
parser.add_argument('--uncer-mode', type=float, default=0)#0 mean 1 max 2 min
print("======mmt_train_dbscan_self-labeling=======")
main() | 23,722 | 40.692443 | 151 | py |
UDAStrongBaseline | UDAStrongBaseline-master/sbs_traindbscan.py | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import DBSCAN
# from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
# from torch.nn import init
#
# from UDAsbs.utils.rerank import compute_jaccard_dist
from UDAsbs import datasets, sinkhornknopp as sk
from UDAsbs import models
from UDAsbs.trainers import DbscanBaseTrainer
from UDAsbs.evaluators import Evaluator, extract_features
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint#, copy_state_dict
from UDAsbs.memorybank.NCEAverage import onlinememory
from UDAsbs.utils.faiss_rerank import compute_jaccard_distance
# import ipdb
start_epoch = best_mAP = 0
def get_data(name, data_dir, l=1):
root = osp.join(data_dir)
dataset = datasets.create(name, root, l)
label_dict = {}
for i, item_l in enumerate(dataset.train):
# dataset.train[i]=(item_l[0],0,item_l[2])
if item_l[1] in label_dict:
label_dict[item_l[1]].append(i)
else:
label_dict[item_l[1]] = [i]
return dataset, label_dict
def get_train_loader(dataset, height, width, choice_c, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.596, 0.558, 0.497])
])
train_set = trainset #dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances, choice_c)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
from UDAsbs.models.dsbn import convert_dsbn
from torch.nn import Parameter
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
name = name.replace('module.', '')
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def create_model(args, ncs, wopre=False):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
if not wopre:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
print('load pretrain model:{}'.format(args.init_1))
# adopt domain-specific BN
convert_dsbn(model_1)
convert_dsbn(model_1_ema)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
for i, cl in enumerate(ncs):
exec('model_1_ema.module.classifier{}_{}.weight.data.copy_(model_1.module.classifier{}_{}.weight.data)'.format(i,cl,i,cl))
return model_1, None, model_1_ema, None
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
class Optimizer:
def __init__(self, target_label, m, dis_gt, t_loader,N, hc=3, ncl=None, n_epochs=200,
weight_decay=1e-5, ckpt_dir='/',fc_len=3500):
self.num_epochs = n_epochs
self.momentum = 0.9
self.weight_decay = weight_decay
self.checkpoint_dir = ckpt_dir
self.N=N
self.resume = True
self.checkpoint_dir = None
self.writer = None
# model stuff
self.hc = len(ncl)#10
self.K = ncl#3000
self.K_c =[fc_len for _ in range(len(ncl))]
self.model = m
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.L = [torch.LongTensor(target_label[i]).to(self.dev) for i in range(len(self.K))]
self.nmodel_gpus = 4#len()
self.pseudo_loader = t_loader#torch.utils.data.DataLoader(t_loader,batch_size=256)
# can also be DataLoader with less aug.
self.train_loader = t_loader
self.lamb = 25#args.lamb # the parameter lambda in the SK algorithm
self.cpu=True
self.dis_gt=dis_gt
dtype_='f64'
if dtype_ == 'f32':
self.dtype = torch.float32 if not self.cpu else np.float32
else:
self.dtype = torch.float64 if not self.cpu else np.float64
self.outs = self.K
# activations of previous to last layer to be saved if using multiple heads.
self.presize = 2048#4096 #
def optimize_labels(self):
if self.cpu:
sk.cpu_sk(self)
else:
sk.gpu_sk(self)
# save Label-assignments: optional
# torch.save(self.L, os.path.join(self.checkpoint_dir, 'L', str(niter) + '_L.gz'))
# free memory
data = 0
self.PS = 0
return self.L
import collections
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def write_sta_im(train_loader):
label2num=collections.defaultdict(int)
save_label=[]
for x in train_loader:
label2num[x[1]]+=1
save_label.append(x[1])
labels=sorted(label2num.items(),key=lambda item:item[1])[::-1]
num = [j for i, j in labels]
distribution = np.array(num)/len(train_loader)
return num,save_label
def print_cluster_acc(label_dict,target_label_tmp):
num_correct = 0
for pid in label_dict:
pid_index = np.asarray(label_dict[pid])
pred_label = np.argmax(np.bincount(target_label_tmp[pid_index]))
num_correct += (target_label_tmp[pid_index] == pred_label).astype(np.float32).sum()
cluster_accuracy = num_correct / len(target_label_tmp)
print(f'cluster accucary: {cluster_accuracy:.3f}')
class uncer(object):
def __init__(self):
self.sm = torch.nn.Softmax(dim=1)
self.log_sm = torch.nn.LogSoftmax(dim=1)
# self.cross_batch=CrossBatchMemory()
self.kl_distance = nn.KLDivLoss(reduction='none')
def kl_cal(self,pred1,pred1_ema):
variance = torch.sum(self.kl_distance(self.log_sm(pred1),
self.sm(pred1_ema.detach())), dim=1)
exp_variance = torch.exp(-variance)
return exp_variance
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters > 0) else None
ncs = [int(x) for x in args.ncs.split(',')]
# ncs_dbscan=ncs.copy()
dataset_target, label_dict = get_data(args.dataset_target, args.data_dir, len(ncs))
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
tar_cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dataset_source, _ = get_data(args.dataset_source, args.data_dir, len(ncs))
sour_cluster_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers,
testset=dataset_source.train)
train_loader_source = get_train_loader(dataset_source, args.height, args.width, 0, args.batch_size, args.workers,
args.num_instances, args.iters, dataset_source.train)
source_classes = dataset_source.num_train_pids
distribution,_ = write_sta_im(dataset_source.train)
fc_len = 3500
model_1, _, model_1_ema, _ = create_model(args, [fc_len for _ in range(len(ncs))])
# print(model_1)
epoch = 0
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=100)
target_features = F.normalize(torch.stack(list(target_features_dict.values())), dim=1)
# Calculate distance
print('==> Create pseudo labels for unlabeled target domain')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
del target_features
if (epoch == 0):
# DBSCAN cluster
eps = 0.6 # 0.6
print('Clustering criterion: eps: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
# num_ids = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
plabel=[]
new_dataset=[]
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:
continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# Initialize source-domain class centroids
print("==> Initialize source-domain class centroids in the hybrid memory")
source_features, _ = extract_features(model_1, sour_cluster_loader, print_freq=50)
sour_fea_dict = collections.defaultdict(list)
print("==> Ending source-domain class centroids in the hybrid memory")
for f, pid, _ in sorted(dataset_source.train):
sour_fea_dict[pid].append(source_features[f].unsqueeze(0))
source_centers = [torch.cat(sour_fea_dict[pid], 0).mean(0) for pid in sorted(sour_fea_dict.keys())]
source_centers = torch.stack(source_centers, 0)
source_centers = F.normalize(source_centers, dim=1)
del sour_fea_dict, source_features, sour_cluster_loader
# Evaluator
evaluator_1 = Evaluator(model_1)
evaluator_1_ema = Evaluator(model_1_ema)
clusters = [args.num_clusters] * args.epochs# TODO: dropout clusters
k_memory=8192
contrast = onlinememory(2048, len(new_dataset),sour_numclass=source_classes,K=k_memory+source_classes,
index2label=target_label, choice_c=args.choice_c, T=0.07,
use_softmax=True).cuda()
contrast.index_memory = torch.cat((torch.arange(source_classes), -1*torch.ones(k_memory).long()), dim=0).cuda()
contrast.memory = torch.cat((source_centers, torch.rand(k_memory, 2048)), dim=0).cuda()
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset), fc_len=fc_len)
uncertainty=collections.defaultdict(list)
print("Training begining~~~~~~!!!!!!!!!")
for epoch in range(len(clusters)):
iters_ = 300 if epoch % 1== 0 else iters
if epoch % 6 == 0 and epoch !=0:
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=50)
target_features = torch.stack(list(target_features_dict.values()))
target_features = F.normalize(target_features, dim=1)
print('==> Create pseudo labels for unlabeled target domain with')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
plabel = []
new_dataset = []
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset),fc_len=fc_len)
contrast.index_memory = torch.cat((torch.arange(source_classes), -1 * torch.ones(k_memory).long()),
dim=0).cuda()
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
target_label_o = o.L
target_label = [list(np.asarray(target_label_o[0].data.cpu())+source_classes)]
contrast.index2label = [[i for i in range(source_classes)] + target_label[0]]
# change pseudo labels
for i in range(len(new_dataset)):
new_dataset[i] = list(new_dataset[i])
for j in range(len(ncs)):
new_dataset[i][j+1] = int(target_label[j][i])
new_dataset[i] = tuple(new_dataset[i])
cc=args.choice_c#(args.choice_c+1)%len(ncs)
train_loader_target = get_train_loader(dataset_target, args.height, args.width, cc,
args.batch_size, args.workers, args.num_instances, iters_, new_dataset)
# Optimizer
params = []
flag = 1.0
# if 20<epoch<=40 or 60<epoch<=80 or 120<epoch:
# flag=0.1
# else:
# flag=1.0
for key, value in model_1.named_parameters():
if not value.requires_grad:
print(key)
continue
params += [{"params": [value], "lr": args.lr*flag, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = DbscanBaseTrainer(model_1, model_1_ema, contrast,
num_cluster=ncs, alpha=args.alpha, fc_len=fc_len)
train_loader_target.new_epoch()
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_target, train_loader_source, optimizer, args.choice_c,
print_freq=args.print_freq, train_iters=iters_)
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model' + str(mid) + '_checkpoint.pth.tar'))
if epoch==20:
args.eval_step=2
elif epoch==40:
args.eval_step=1
if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
mAP_1 = 0#evaluator_1.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
# cmc_flag=False)
mAP_2 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
cmc_flag=False)
is_best = (mAP_1 > best_mAP) or (mAP_2 > best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1, (is_best), best_mAP, 1)
save_model(model_1_ema, (is_best and (mAP_1 <= mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-st', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-tt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--choice_c', type=int, default=0)
parser.add_argument('--num-clusters', type=int, default=700)
parser.add_argument('--ncs', type=str, default='60')
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_multi',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--iters', type=int, default=300)
parser.add_argument('--lambda-value', type=float, default=0)
# training configs
parser.add_argument('--rr-gpu', action='store_true',
help="use GPU for accelerating clustering")
# parser.add_argument('--init-1', type=str, default='logs/personxTOpersonxval/resnet_ibn50a-pretrain-1_gem_RA//model_best.pth.tar', metavar='PATH')
parser.add_argument('--init-1', type=str,
default='logs/market1501TOdukemtmc/resnet50-pretrain-1005/model_best.pth.tar',
metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=5)
parser.add_argument('--n-jobs', type=int, default=8)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs/d2m_baseline/tmp'))
parser.add_argument('--lambda-tri', type=float, default=1.0)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--lambda-ct', type=float, default=0.05)
parser.add_argument('--uncer-mode', type=float, default=0)#0 mean 1 max 2 min
print("======mmt_train_dbscan_self-labeling=======")
main() | 22,980 | 40.0375 | 151 | py |
UDAStrongBaseline | UDAStrongBaseline-master/source_pretrain.py | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from UDAsbs import datasets
from UDAsbs import models
from UDAsbs.trainers import PreTrainer, PreTrainer_multi
from UDAsbs.evaluators import Evaluator
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from UDAsbs.utils.lr_scheduler import WarmupMultiStepLR
start_epoch = best_mAP = 0
def get_data(name, data_dir, height, width, batch_size, workers, num_instances, iters=200):
root = osp.join(data_dir)
dataset = datasets.create(name, root)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = dataset.train
num_classes = dataset.num_train_pids
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
# T.AugMix(),
T.ToTensor(),
normalizer
])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
test_loader = DataLoader(
Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, train_loader, test_loader
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
if not args.evaluate:
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
else:
log_dir = osp.dirname(args.resume)
sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
dataset_source, num_classes, train_loader_source, test_loader_source = \
get_data(args.dataset_source, args.data_dir, args.height,
args.width, args.batch_size, args.workers, args.num_instances, iters)
dataset_target, _, train_loader_target, test_loader_target = \
get_data(args.dataset_target, args.data_dir, args.height,
args.width, args.batch_size, args.workers, 0, iters)
# Create model
model = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=[num_classes])
model.cuda()
model = nn.DataParallel(model)
print(model)
# Load from checkpoint
if args.resume:
checkpoint = load_checkpoint(args.resume)
copy_state_dict(checkpoint['state_dict'], model)
start_epoch = checkpoint['epoch']
best_mAP = checkpoint['best_mAP']
print("=> Start epoch {} best mAP {:.1%}"
.format(start_epoch, best_mAP))
# Evaluator
evaluator = Evaluator(model)
# args.evaluate=True
if args.evaluate:
print("Test on source domain:")
evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True, rerank=args.rerank)
print("Test on target domain:")
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
return
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01,
warmup_iters=args.warmup_step)
# Trainer
trainer = PreTrainer(model, num_classes, margin=args.margin) if 'multi' not in args.arch else PreTrainer_multi(model, num_classes, margin=args.margin)
# Start training
for epoch in range(start_epoch, args.epochs):
train_loader_source.new_epoch()
train_loader_target.new_epoch()
trainer.train(epoch, train_loader_source, train_loader_target, optimizer,
train_iters=len(train_loader_source), print_freq=args.print_freq)
lr_scheduler.step()
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
_, mAP = evaluator.evaluate(test_loader_source, dataset_source.query,
dataset_source.gallery, cmc_flag=True)
is_best = mAP > best_mAP
best_mAP = max(mAP, best_mAP)
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
print('\n * Finished epoch {:3d} source mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
print("Test on target domain:")
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Pre-training on the source domain")
# data
parser.add_argument('-ds', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-dt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained ")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--warmup-step', type=int, default=10)
parser.add_argument('--milestones', nargs='+', type=int, default=[40, 70], help='milestones for the learning rate decay')
# training configs
parser.add_argument('--resume', type=str, default="", metavar='PATH')
#logs/market1501TOdukemtmc/resnet50-pretrain-1_gempooling/model_best.pth.tar
parser.add_argument('--evaluate', action='store_true',
help="evaluation only")
parser.add_argument('--eval-step', type=int, default=40)
parser.add_argument('--rerank', action='store_true',
help="evaluation only")
parser.add_argument('--epochs', type=int, default=80)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--margin', type=float, default=0.0, help='margin for the triplet loss with batch hard')
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
main()
| 9,253 | 39.946903 | 154 | py |
UDAStrongBaseline | UDAStrongBaseline-master/sbs_trainkmeans.py | from __future__ import print_function, absolute_import
import argparse
import os
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import DBSCAN,KMeans
# from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
# from torch.nn import init
from UDAsbs import datasets, sinkhornknopp as sk
from UDAsbs import models
from UDAsbs.trainers import DbscanBaseTrainer
from UDAsbs.evaluators import Evaluator, extract_features
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint#, copy_state_dict
from UDAsbs.models.memory_bank import onlinememory
from UDAsbs.utils.faiss_rerank import compute_jaccard_distance
# import ipdb
from UDAsbs.models.dsbn import convert_dsbn
from torch.nn import Parameter
import faiss
import collections
start_epoch = best_mAP = 0
def get_data(name, data_dir, l=1, shuffle=False):
root = osp.join(data_dir)
dataset = datasets.create(name, root, l)
label_dict = {}
for i, item_l in enumerate(dataset.train):
if shuffle:
labels= tuple([0 for i in range(l)])
dataset.train[i]=(item_l[0],)+labels+(item_l[-1],)
if item_l[1] in label_dict:
label_dict[item_l[1]].append(i)
else:
label_dict[item_l[1]] = [i]
return dataset, label_dict
def get_train_loader(dataset, height, width, choice_c, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.596, 0.558, 0.497])
])
train_set = trainset #dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances, choice_c)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
name = name.replace('module.', '')
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def create_model(args, ncs, wopre=False):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
if not wopre:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
print('load pretrain model:{}'.format(args.init_1))
# adopt domain-specific BN
convert_dsbn(model_1)
convert_dsbn(model_1_ema)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
for i, cl in enumerate(ncs):
exec('model_1_ema.module.classifier{}_{}.weight.data.copy_(model_1.module.classifier{}_{}.weight.data)'.format(i,cl,i,cl))
return model_1, model_1_ema
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
class Optimizer:
def __init__(self, target_label, m, dis_gt, t_loader,N, hc=3, ncl=None, n_epochs=200,
weight_decay=1e-5, ckpt_dir='/'):
self.num_epochs = n_epochs
self.momentum = 0.9
self.weight_decay = weight_decay
self.checkpoint_dir = ckpt_dir
self.N=N
self.resume = True
self.checkpoint_dir = None
self.writer = None
# model stuff
self.hc = len(ncl)#10
self.K = ncl#3000
self.model = m
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.L = [torch.LongTensor(target_label[i]).to(self.dev) for i in range(len(self.K))]
self.nmodel_gpus = 4#len()
self.pseudo_loader = t_loader#torch.utils.data.DataLoader(t_loader,batch_size=256)
# can also be DataLoader with less aug.
self.train_loader = t_loader
self.lamb = 25#args.lamb # the parameter lambda in the SK algorithm
self.cpu=True
self.dis_gt=dis_gt
dtype_='f64'
if dtype_ == 'f32':
self.dtype = torch.float32 if not self.cpu else np.float32
else:
self.dtype = torch.float64 if not self.cpu else np.float64
self.outs = self.K
# activations of previous to last layer to be saved if using multiple heads.
self.presize = 2048
def optimize_labels(self):
if self.cpu:
sk.cpu_sk(self)
else:
sk.gpu_sk(self)
self.PS = 0
return self.L
def print_cluster_acc(label_dict,target_label_tmp):
num_correct = 0
for pid in label_dict:
pid_index = np.asarray(label_dict[pid])
pred_label = np.argmax(np.bincount(target_label_tmp[pid_index]))
num_correct += (target_label_tmp[pid_index] == pred_label).astype(np.float32).sum()
cluster_accuracy = num_correct / len(target_label_tmp)
print(f'cluster accucary: {cluster_accuracy:.3f}')
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log{}.txt'.format(args.cluster_iter)))
print("==========\nArgs:{}\n==========".format(args))
iters = args.iters if (args.iters > 0) else None
ncs = [int(x) for x in args.ncs.split(',')]
if args.cluster_iter==10: args.epochs = 80
# Create data loaders
dataset_target, label_dict = get_data(args.dataset_target, args.data_dir, len(ncs),True)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
tar_cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dataset_source, _ = get_data(args.dataset_source, args.data_dir, len(ncs))
sour_cluster_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers,
testset=dataset_source.train)
train_loader_source = get_train_loader(dataset_source, args.height, args.width, 0, args.batch_size, args.workers,
args.num_instances, args.iters, dataset_source.train)
model_1, model_1_ema = create_model(args, [fc_len for fc_len in ncs])
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=100)
target_features = F.normalize(torch.stack(list(target_features_dict.values())), dim=1)
# Calculate distance
print('==> Create pseudo labels for unlabeled target domain')
cluster_name='kmeans'
if cluster_name=='dbscan':
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
del target_features
# DBSCAN cluster
eps = 0.6 # 0.6
print('Clustering criterion: eps: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
# num_ids = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
plabel=[]
new_dataset=[]
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:
continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) +1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
else:
prenc_i = -1
moving_avg_features = target_features.numpy()
target_label = []
for nc_i in ncs:
plabel_path = os.path.join(args.logs_dir,'target_label{}_{}.npy'.format(nc_i, args.cluster_iter))
if os.path.exists(plabel_path):
target_label_tmp = np.load(plabel_path)
print('\n {} existing\n'.format(plabel_path))
else:
if prenc_i == nc_i:
target_label.append(target_label_tmp)
print_cluster_acc(label_dict, target_label_tmp)
continue
# km = KMeans(n_clusters=nc_i, random_state=args.seed, n_jobs=args.n_jobs).fit(moving_avg_features)
# target_label_tmp = np.asarray(km.labels_)
# cluster_centers = np.asarray(km.cluster_centers_)
cluster = faiss.Kmeans(2048, nc_i, niter=300, verbose=True, gpu=True)
cluster.train(moving_avg_features)
_, labels = cluster.index.search(moving_avg_features, 1)
target_label_tmp = labels.reshape(-1)
target_label.append(target_label_tmp)
print_cluster_acc(label_dict, target_label_tmp)
prenc_i=nc_i
new_dataset = dataset_target.train
# Initialize source-domain class centroids
print("==> Initialize source-domain class centroids in the hybrid memory")
source_features, _ = extract_features(model_1, sour_cluster_loader, print_freq=50)
sour_fea_dict = collections.defaultdict(list)
print("==> Ending source-domain class centroids in the hybrid memory")
for item in sorted(dataset_source.train):
f=item[0]
pid=item[1]
sour_fea_dict[pid].append(source_features[f].unsqueeze(0))
source_centers = [torch.cat(sour_fea_dict[pid], 0).mean(0) for pid in sorted(sour_fea_dict.keys())]
source_centers = torch.stack(source_centers, 0)
source_centers = F.normalize(source_centers, dim=1)
del sour_fea_dict, source_features, sour_cluster_loader
# Evaluator
evaluator_1 = Evaluator(model_1)
evaluator_1_ema = Evaluator(model_1_ema)
source_classes = dataset_source.num_train_pids
k_memory=8192
contrast = onlinememory(2048, sour_numclass=source_classes,K=k_memory+source_classes,
index2label=target_label, choice_c=args.choice_c, T=0.07,
use_softmax=True).cuda()
contrast.index_memory = torch.cat((torch.arange(source_classes), -1*torch.ones(k_memory).long()), dim=0).cuda()
contrast.memory = torch.cat((source_centers, torch.rand(k_memory, 2048)), dim=0).cuda()
skin=True
if skin:
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,testset=new_dataset)
else:
tar_selflabel_loader=None
o = Optimizer(target_label, dis_gt=None, m=model_1_ema, ncl=ncs, t_loader=tar_selflabel_loader, N=len(new_dataset))
print("Training begining~~~~~~!!!!!!!!!")
for epoch in range(args.epochs):
iters_ = 300 if epoch % 1== 0 else iters
# if epoch % 6 == 0 and epoch != 0:
if epoch == args.epochs - 1:
prenc_i=-1
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=50)
target_features = torch.stack(list(target_features_dict.values())) # torch.cat([target_features[f[0]].unsqueeze(0) for f in dataset_target.train], 0)
target_features = F.normalize(target_features, dim=1)
for in_, nc_i in enumerate(ncs):
if cluster_name == 'dbscan':
print('==> Create pseudo labels for unlabeled target domain with')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
plabel = []
new_dataset = []
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1: continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
else:
if prenc_i == nc_i:
continue
print('\n Clustering into {} classes \n'.format(nc_i))
moving_avg_features = target_features.numpy()
km = KMeans(n_clusters=nc_i, random_state=args.seed, n_jobs=args.n_jobs).fit(moving_avg_features)
target_label_tmp = np.asarray(km.labels_)
cluster_centers = np.asarray(km.cluster_centers_)
# cluster = faiss.Kmeans(2048, nc_i, niter=300, verbose=True, gpu=True)
# cluster.train(moving_avg_features)
# _, labels = cluster.index.search(moving_avg_features, 1)
# target_label_tmp = labels.reshape(-1)
np.save("{}/target_label{}_{}.npy".format(args.logs_dir, nc_i, args.cluster_iter + 1), target_label_tmp)
# cluster_centers = cluster.centroids
print_cluster_acc(label_dict, target_label_tmp)
dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
o.L[in_] = torch.LongTensor(target_label_tmp).to(dev)
prenc_i = nc_i
break
# tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
# testset=new_dataset)
# o = Optimizer(target_label, dis_gt=None, m=model_1, ncl=ncs,
# t_loader=tar_selflabel_loader, N=len(new_dataset),fc_len=fc_len)
contrast.index_memory = torch.cat((torch.arange(source_classes), -1 * torch.ones(k_memory).long()),
dim=0).cuda()
target_label_o = o.L
target_label = [np.asarray(target_label_o[i].data.cpu()) for i in range(len(ncs))]
target_label_mb = [list(np.asarray(target_label_o[i].data.cpu())+source_classes) for i in range(len(ncs))]
contrast.index2label = [[i for i in range(source_classes)] + target_label_mb[i] for i in range(len(ncs))]
for i in range(len(new_dataset)):
new_dataset[i] = list(new_dataset[i])
for j in range(len(ncs)):
new_dataset[i][j+1] = int(target_label[j][i])
new_dataset[i] = tuple(new_dataset[i])
#cc =(args.choice_c+1)%len(ncs)
train_loader_target = get_train_loader(dataset_target, args.height, args.width, args.choice_c,
args.batch_size, args.workers, args.num_instances, iters_, new_dataset)
# Optimizer
params = []
if 40<epoch<=70:flag=0.1
elif 70<epoch<=80:flag = 0.01
else:flag=1.0
for key, value in model_1.named_parameters():
if not value.requires_grad:
print(key)
continue
params += [{"params": [value], "lr": args.lr*flag, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = DbscanBaseTrainer(model_1, model_1_ema, contrast, num_cluster=ncs, alpha=args.alpha)
train_loader_target.new_epoch()
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_target, train_loader_source, optimizer, args.choice_c,
print_freq=args.print_freq, train_iters=iters_)
o.optimize_labels()
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model' + str(mid) + '_checkpoint.pth.tar'))
if epoch==20:
args.eval_step=2
elif epoch==50:
args.eval_step=1
if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
mAP_1 = 0#evaluator_1.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
# cmc_flag=False)
mAP_2 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
cmc_flag=False)
is_best = (mAP_1 > best_mAP) or (mAP_2 > best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1, (is_best), best_mAP, 1)
save_model(model_1_ema, (is_best and (mAP_1 <= mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
print('Test on the best model.')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model_1_ema.load_state_dict(checkpoint['state_dict'])
evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-st', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-tt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--choice_c', type=int, default=0)
parser.add_argument('--num-clusters', type=int, default=-1, help='discard')
parser.add_argument('--cluster-iter', type=int, default=10)
parser.add_argument('--ncs', type=str, default='600,700,800')
parser.add_argument('--k1', type=int, default=30, help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6, help="hyperparameter for jaccard distance")
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_multi',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--iters', type=int, default=300)
parser.add_argument('--lambda-value', type=float, default=0)
# training configs
parser.add_argument('--rr-gpu', action='store_true',
help="use GPU for accelerating clustering")
parser.add_argument('--init-1', type=str,
default='logs/market1501TOdukemtmc/resnet50-pretrain-1005/model_best.pth.tar',
metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=5)
parser.add_argument('--n-jobs', type=int, default=8)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs/d2m_baseline/tmp'))
parser.add_argument('--lambda-tri', type=float, default=1.0)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--lambda-ct', type=float, default=1.0)
parser.add_argument('--uncer-mode', type=float, default=0, help='0 mean, 1 max, 2 min')
print("======mmt_train_dbscan_self-labeling=======")
main()
| 23,432 | 42.718284 | 162 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/evaluators.py | from __future__ import print_function, absolute_import
import time
from collections import OrderedDict
import numpy as np
import torch
from .evaluation_metrics import cmc, mean_ap
from .feature_extraction import extract_cnn_feature
from .utils.meters import AverageMeter
from .utils.rerank import re_ranking
def extract_features(model, data_loader, choice_c=0, adaibn=False, print_freq=100, metric=None):
# if adaibn==True:
# model.train()
# for i, item in enumerate(data_loader):
# imgs, fnames, pids = item[0], item[1], item[choice_c + 2]
# outputs = model(imgs)
# if (i + 1) % print_freq == 0:
# print('Extract Features: [{}/{}]\t'
# .format(i + 1, len(data_loader)))
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
features = OrderedDict()
labels = OrderedDict()
end = time.time()
with torch.no_grad():
for i, item in enumerate(data_loader):
imgs, fnames, pids =item[0], item[1], item[choice_c+2]
data_time.update(time.time() - end)
outputs = extract_cnn_feature(model, imgs)
for fname, output, pid in zip(fnames, outputs, pids):
features[fname] = output
labels[fname] = pid
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 0:
print('Extract Features: [{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
.format(i + 1, len(data_loader),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg))
return features, labels
def pairwise_distance(features, query=None, gallery=None, metric=None):
if query is None and gallery is None:
n = len(features)
x = torch.cat(list(features.values()))
x = x.view(n, -1)
if metric is not None:
x = metric.transform(x)
dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True) * 2
dist_m = dist_m.expand(n, n) - 2 * torch.mm(x, x.t())
return dist_m
x = torch.cat([features[item[0]].unsqueeze(0) for item in query], 0)
y = torch.cat([features[item[0]].unsqueeze(0) for item in gallery], 0)
m, n = x.size(0), y.size(0)
x = x.view(m, -1)
y = y.view(n, -1)
if metric is not None:
x = metric.transform(x)
y = metric.transform(y)
dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()
dist_m.addmm_(1, -2, x, y.t())
return dist_m, x.numpy(), y.numpy()
from .utils import to_numpy
def submission_visUDA(distmat,query_ids,gallery_ids,query,gallery):
#TODO
query_name2index={}
with open("/home/zhengkecheng3/data/reid/challenge_datasets/index_validation_query.txt", 'r') as f: # with语句自动调用close()方法
line = f.readline()
while line:
eachline = line.split()
query_name2index[eachline[0]]=eachline[-1]
line = f.readline()
gallery_name2index = {}
with open("/home/zhengkecheng3/data/reid/challenge_datasets/index_validation_gallery.txt",
'r') as f:
line = f.readline()
while line:
eachline = line.split()
gallery_name2index[eachline[0]] = eachline[-1]
line = f.readline()
distmat = to_numpy(distmat)
indices = np.argsort(distmat, axis=1)
result={}
for i,x in enumerate(query_ids):
result[str(x)]=indices[i,:100]
with open('result.txt','w') as f:
for i in range(len(query_ids)):
indexs=result[str(i)]
out_str=""
for j in indexs:
item_now=(4-len(str(j)))*'0'+str(j)
out_str=out_str+item_now+" "
f.write(out_str[:-1]+'\n')
print(result)
def evaluate_all(query_features, gallery_features, distmat, query=None, gallery=None,
query_ids=None, gallery_ids=None,
query_cams=None, gallery_cams=None,
cmc_topk=(1, 5, 10), cmc_flag=False):
if query is not None and gallery is not None:
query_ids = [item[1] for item in query]
gallery_ids = [item[1] for item in gallery]
query_cams = [item[-1] for item in query]
gallery_cams = [item[-1] for item in gallery]
else:
assert (query_ids is not None and gallery_ids is not None
and query_cams is not None and gallery_cams is not None)
# submission_visUDA(distmat, query_ids, gallery_ids,query,gallery)
# Compute mean AP
mAP = mean_ap(distmat, query_ids, gallery_ids, query_cams, gallery_cams)
print('Mean AP: {:4.1%}'.format(mAP))
cmc_configs = {
'market1501': dict(separate_camera_set=False,
single_gallery_shot=False,
first_match_break=True)
}
cmc_scores = {name: cmc(distmat, query_ids, gallery_ids,
query_cams, gallery_cams, **params)
for name, params in cmc_configs.items()}
print('CMC Scores:')
for k in cmc_topk:
print(' top-{:<4}{:12.1%}'
.format(k,
cmc_scores['market1501'][k-1]))
if (not cmc_flag):
return mAP
return cmc_scores['market1501'][0], mAP
class Evaluator(object):
def __init__(self, model):
super(Evaluator, self).__init__()
self.model = model
def evaluate(self, data_loader, query, gallery, metric=None, cmc_flag=False, rerank=False, pre_features=None):
if (pre_features is None):
features, _ = extract_features(self.model, data_loader)
else:
features = pre_features
distmat, query_features, gallery_features = pairwise_distance(features, query, gallery, metric=metric)
if (not rerank):
results = evaluate_all(query_features, gallery_features, distmat, query=query, gallery=gallery, cmc_flag=cmc_flag)
return results
print('Applying person re-ranking ...')
distmat_qq,_,_ = pairwise_distance(features, query, query, metric=metric)
distmat_gg,_,_ = pairwise_distance(features, gallery, gallery, metric=metric)
distmat = re_ranking(distmat.numpy(), distmat_qq.numpy(), distmat_gg.numpy())
return evaluate_all(query_features, gallery_features, distmat, query=query, gallery=gallery, cmc_flag=cmc_flag)
| 6,592 | 37.109827 | 126 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/trainers.py | from __future__ import print_function, absolute_import
import time
import torch
import torch.nn as nn
from torch.nn import functional as F
from .evaluation_metrics import accuracy
from .loss import SoftTripletLoss_vallia, CrossEntropyLabelSmooth, SoftTripletLoss, SoftEntropy
from .memorybank.NCECriterion import MultiSoftmaxLoss, NCECriterion, NCESoftmaxLoss
from .utils.meters import AverageMeter
class PreTrainer_multi(object):
def __init__(self, model, num_classes, margin=0.0):
super(PreTrainer_multi, self).__init__()
self.model = model
self.criterion_ce = CrossEntropyLabelSmooth(num_classes).cuda()
self.criterion_triple = SoftTripletLoss_vallia(margin=margin).cuda()
def train(self, epoch, data_loader_source, data_loader_target, optimizer, train_iters=200, print_freq=1):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_tr = AverageMeter()
precisions = AverageMeter()
losses_ce_3 = AverageMeter()
losses_tr_3 = AverageMeter()
precisions_3 = AverageMeter()
end = time.time()
for i in range(train_iters):
# import ipdb
# ipdb.set_trace()
source_inputs = data_loader_source.next()
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
t_inputs, _ = self._parse_data(target_inputs)
s_features, s_cls_out,_,_,s_cls_out_3,s_features_3 = self.model(s_inputs,training=True)
# target samples: only forward
self.model(t_inputs,training=True)
# backward main #
loss_ce, loss_tr, prec1 = self._forward(s_features, s_cls_out[0], targets)
loss_ce_3, loss_tr_3, prec1_3 = self._forward(s_features_3, s_cls_out_3[0], targets)
loss = loss_ce + loss_tr + loss_ce_3 + loss_tr_3
losses_ce.update(loss_ce.item())
losses_tr.update(loss_tr.item())
precisions.update(prec1)
losses_ce_3.update(loss_ce_3.item())
losses_tr_3.update(loss_tr_3.item())
precisions_3.update(prec1_3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_tr {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%})\t'
'Loss_ce_3 {:.3f} ({:.3f})\t'
'Loss_tr_3 {:.3f} ({:.3f})\t'
'Prec_3 {:.2%} ({:.2%})'
.format(epoch, i + 1, train_iters,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_tr.val, losses_tr.avg,
precisions.val, precisions.avg,
losses_ce_3.val, losses_ce_3.avg,
losses_tr_3.val, losses_tr_3.avg,
precisions_3.val, precisions_3.avg))
def _parse_data(self, inputs):
imgs, _, pids,_, _ = inputs#, pids, index
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, targets):
loss_ce = self.criterion_ce(s_outputs, targets)
loss_tr = self.criterion_triple(s_features, s_features, targets)
prec, = accuracy(s_outputs.data, targets.data)
prec = prec[0]
return loss_ce, loss_tr, prec
class PreTrainer(object):
def __init__(self, model, num_classes, margin=0.0):
super(PreTrainer, self).__init__()
self.model = model
self.criterion_ce = CrossEntropyLabelSmooth(num_classes).cuda()
self.criterion_triple = SoftTripletLoss_vallia(margin=margin).cuda()
def train(self, epoch, data_loader_source, data_loader_target, optimizer, train_iters=200, print_freq=1):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_tr = AverageMeter()
precisions = AverageMeter()
end = time.time()
for i in range(train_iters):
# import ipdb
# ipdb.set_trace()
source_inputs = data_loader_source.next()
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
t_inputs, _ = self._parse_data(target_inputs)
s_features, s_cls_out,_,_ = self.model(s_inputs,training=True)
# target samples: only forward
_,_,_,_= self.model(t_inputs,training=True)
# backward main #
loss_ce, loss_tr, prec1 = self._forward(s_features, s_cls_out[0], targets)
loss = loss_ce + loss_tr
losses_ce.update(loss_ce.item())
losses_tr.update(loss_tr.item())
precisions.update(prec1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_tr {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%})'
.format(epoch, i + 1, train_iters,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_tr.val, losses_tr.avg,
precisions.val, precisions.avg))
def _parse_data(self, inputs):
imgs, _, pids,_, _ = inputs#, pids, index
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, targets):
loss_ce = self.criterion_ce(s_outputs, targets)
loss_tr = self.criterion_triple(s_features, s_features, targets)
prec, = accuracy(s_outputs.data, targets.data)
prec = prec[0]
return loss_ce, loss_tr, prec
class DbscanBaseTrainer_unc_ema(object):
def __init__(self, model_1, model_1_ema, contrast, contrast_center, contrast_center_sour, num_cluster=None,
c_name=None, alpha=0.999, fc_len=3000,source_classes=702,uncer_mode=0):
super(DbscanBaseTrainer_unc_ema, self).__init__()
self.model_1 = model_1
self.num_cluster = num_cluster
self.c_name = [fc_len for _ in range(len(num_cluster))]
self.model_1_ema = model_1_ema
self.uncer_mode=uncer_mode
self.alpha = alpha
self.criterion_ce = CrossEntropyLabelSmooth(self.num_cluster[0],False).cuda()
# self.criterion_tri = SoftTripletLoss(margin=0.0).cuda()
self.criterion_tri_uncer = SoftTripletLoss(margin=None,uncer_mode=self.uncer_mode).cuda()
self.source_classes = source_classes
self.contrast = contrast
# self.kl = nn.KLDivLoss()
self.sm = torch.nn.Softmax(dim=1)
self.log_sm = torch.nn.LogSoftmax(dim=1)
# self.cross_batch=CrossBatchMemory()
self.kl_distance = nn.KLDivLoss(reduction='none')
def train(self, epoch, data_loader_target, data_loader_source, optimizer, choice_c, lambda_tri=1.0
, lambda_ct=1.0, lambda_reg=0.06, print_freq=100, train_iters=200, uncertainty_d=None):
self.model_1.train()
self.model_1_ema.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = [AverageMeter(), AverageMeter()]
losses_tri = [AverageMeter(), AverageMeter()]
loss_kldiv = AverageMeter()
loss_s = AverageMeter()
losses_tri_unc = AverageMeter()
contra_loss = AverageMeter()
precisions = [AverageMeter(), AverageMeter()]
end = time.time()
for i in range(train_iters):
target_inputs = data_loader_target.next()
source_inputs = data_loader_source.next()
data_time.update(time.time() - end)
# process inputs
items = self._parse_data(target_inputs)
items_source = self._parse_data(source_inputs)
inputs_1_t, inputs_2_t, index_t = items[0], items[1], items[-1]
inputs_1_s, inputs_2_s, index_s = items_source[0], items_source[1], items_source[-1]
inputs = self.range_spbn(inputs_1_s, inputs_1_t)
f_out, p_out, memory_f, _, p_out_3, f_out_3 = self.model_1(inputs, training=True)
f_out_s1, f_out_t1 = self.derange_spbn(f_out)
_, p_out_t1 = self.derange_spbn(p_out[0])
_, memory_f_t1 = self.derange_spbn(memory_f)
_, p_out_3_t1 = self.derange_spbn(p_out_3[0])
_, f_out_3_t1 = self.derange_spbn(f_out_3)
with torch.no_grad():
f_out_ema, p_out_ema, memory_f_ema, _, p_out_3_ema, f_out_3_ema \
= self.model_1_ema(inputs, training=True)
f_out_s1_ema, f_out_t1_ema = self.derange_spbn(f_out_ema)
_, p_out_t1_ema = self.derange_spbn(p_out_ema[0])
_, memory_f_t1_ema = self.derange_spbn(memory_f_ema)
_, p_out_3_t1_ema = self.derange_spbn(p_out_3_ema[0])
_, f_out_3_t1_ema = self.derange_spbn(f_out_3_ema)
with torch.no_grad():
queue = self.contrast.memory[:self.contrast.sour_numclass, :].clone()
ml_sour = torch.matmul(f_out_t1, queue.transpose(1, 0).detach())
ml_sour_ema = torch.matmul(f_out_t1_ema, queue.transpose(1, 0).detach())
########## [memory center]-level uncertainty
loss_ce_1, loss_reg, exp_variance = self.update_variance(items[2], p_out_t1, p_out_3_t1,
p_out_t1_ema, p_out_3_t1_ema, ml_sour, ml_sour_ema, f_out_t1, f_out_t1_ema)
loss_ce_1 = loss_ce_1#(loss_ce_1+loss_ce_1_3)/2.0
exp_variance_np=exp_variance.data.cpu().numpy()
for i_num,i_un in enumerate(index_t.data.cpu().numpy()):
uncertainty_d[i_un].append(exp_variance_np[i_num])
# exp_variance=torch.tensor(0)
loss_kl = exp_variance.mean()
contra_loss_instance, contra_loss_center, _, _ = \
self.contrast(memory_f_t1, f_out_s1, f_out_t1, f_out_t1_ema, index_t, items_source[2], exp_variance, epoch=epoch)
########## feature-level uncertainty
# loss_ce_1, exp_variance = self.update_variance_self(items[2], p_out_t1, f_out_t1, f_out_t1_ema )
########## normal ce loss
loss_ce_1_norm = torch.tensor(0)#(self.criterion_ce(p_out_t1, items[2]) +self.criterion_ce(p_out_3_t1, items[2])) / 2.0
########## uncertainty hard triplet loss
loss_tri_unc = self.criterion_tri_uncer(f_out_t1, f_out_t1_ema, items[2], exp_variance)
if epoch % 6 != 0:
loss = loss_ce_1 + lambda_tri*loss_tri_unc + lambda_reg*loss_reg + lambda_ct*contra_loss_instance + contra_loss_center
else:
loss = loss_ce_1 + lambda_tri*loss_tri_unc + lambda_reg*loss_reg + contra_loss_center
optimizer.zero_grad()
loss.backward()
optimizer.step()
self._update_ema_variables(self.model_1, self.model_1_ema, self.alpha, epoch * len(data_loader_target) + i)
prec_1, = accuracy(p_out_t1.data, items[choice_c + 2].data)
losses_ce[0].update(loss_ce_1.item())
losses_ce[1].update(loss_ce_1_norm.item())
# losses_tri[0].update(loss_tri_1.item())
loss_s.update(contra_loss_center.item())
loss_kldiv.update(loss_kl.item())
losses_tri_unc.update(loss_tri_unc.item())
contra_loss.update(contra_loss_instance.item())
precisions[0].update(prec_1[0])
# print log #
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 1:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} / {:.3f}\t'
'loss_kldiv {:.3f}\t'
'Loss_tri {:.3f} / Loss_tri_soft {:.3f} \t'
'contra_loss_center {:.3f}\t'
'contra_loss {:.3f}\t'
'Prec {:.2%} / {:.2%}\t'
.format(epoch, i, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce[0].avg, losses_ce[1].avg, loss_kldiv.avg,
losses_tri[0].avg, losses_tri_unc.avg, loss_s.avg, contra_loss.avg,
precisions[0].avg, precisions[1].avg))
return uncertainty_d
def update_variance(self, labels, pred1, pred2, pred_ema, pred2_ema, ml_sour, ml_sour_ema,f_out_t1,f_out_t1_ema):
#items[2], p_out_t1, p_out_3_t1, p_out_t1_ema, ml_sour,ml_sour_ema,f_out_t1,f_out_t1_ema)
loss_4layer = self.criterion_ce(pred1, labels)
loss_3layer = self.criterion_ce(pred2, labels)
only_sour=False
if only_sour:
variance = torch.sum(self.kl_distance(self.log_sm(ml_sour), self.sm(ml_sour_ema.detach())), dim=1)
else:
# variance = torch.sum(self.kl_distance(self.log_sm(pred2), self.sm(pred_ema.detach())), dim=1)
# variance = (torch.sum(self.kl_distance(self.log_sm(ml_sour), self.sm(ml_sour_ema.detach())), dim=1) +
# torch.sum(self.kl_distance(self.log_sm(pred1), self.sm(pred2_ema.detach())), dim=1)) / 2.0
variance = torch.sum(self.kl_distance(self.log_sm(torch.cat((pred2,ml_sour),1)), self.sm(torch.cat((pred2_ema,ml_sour_ema),1).detach())), dim=1)
# variance = ( torch.sum(self.kl_distance(self.log_sm(torch.cat((pred1,ml_sour),1)), self.sm(torch.cat((pred2,ml_sour_ema),1).detach())), dim=1)
# +torch.sum(self.kl_distance(self.log_sm(f_out_t1),self.sm(f_out_t1_ema.detach())), dim=1) )/2.0
# variance = (torch.sum(self.kl_distance(self.log_sm(torch.cat((pred1,ml_sour),1)), self.sm(torch.cat((pred2 ,ml_sour_ema),1).detach())), dim=1)+\
# torch.sum(self.kl_distance(self.log_sm(torch.cat((pred1,ml_sour),1)), self.sm(torch.cat((pred_ema,ml_sour_ema),1).detach())), dim=1))/2.0
# variance = (torch.sum(self.kl_distance(self.log_sm(pred1),self.sm(pred2.detach())), dim=1) + \
# torch.sum(self.kl_distance(self.log_sm(pred1),self.sm(pred_ema.detach())), dim=1)) / 2.0
exp_variance = torch.exp(-variance)
loss = torch.mean(loss_4layer * exp_variance) + torch.mean(loss_3layer* exp_variance)
loss_reg = torch.mean(variance)
return loss,loss_reg,exp_variance
def update_variance_self(self, labels, pred1, tri_t, tri_t_ema):
loss = self.criterion_ce(pred1, labels)
variance = torch.sum(self.kl_distance(self.log_sm(tri_t),self.sm(tri_t_ema)), dim=1)
exp_variance = torch.exp(-variance)
loss = torch.mean(loss * exp_variance) + torch.mean(variance)
return loss, exp_variance
def softmax_kl_loss(self, input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits / 0.2, dim=1)
return F.kl_div(input_log_softmax, target_softmax, size_average=False)
def range_spbn(self, inputs_1_s, inputs_1_t):
# arrange batch for domain-specific BN
device_num = torch.cuda.device_count()
B, C, H, W = inputs_1_s.size()
def reshape(inputs):
return inputs.view(device_num, -1, C, H, W)
inputs_1_s, inputs_1_t = reshape(inputs_1_s), reshape(inputs_1_t)
inputs = torch.cat((inputs_1_s, inputs_1_t), 1).view(-1, C, H, W)
return inputs
def derange_spbn(self, f_out):
device_num = torch.cuda.device_count()
# de-arrange batch
f_out = f_out.view(device_num, -1, f_out.size(-1))
f_out_s, f_out_t = f_out.split(f_out.size(1) // 2, dim=1)
f_out_s, f_out_t = f_out_s.contiguous().view(-1, f_out.size(-1)), f_out_t.contiguous().view(-1, f_out.size(-1))
return f_out_s, f_out_t
def get_shuffle_ids(self, bsz):
"""generate shuffle ids for shufflebn"""
forward_inds = torch.randperm(bsz).long().cuda()
backward_inds = torch.zeros(bsz).long().cuda()
value = torch.arange(bsz).long().cuda()
backward_inds.index_copy_(0, forward_inds, value)
return forward_inds, backward_inds
def _update_ema_variables(self, model, ema_model, alpha, global_step):
alpha = min(1 - 1 / (global_step + 1), alpha)
for (ema_name, ema_param), (model_name, param) in zip(ema_model.named_parameters(), model.named_parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def _parse_data(self, inputs):
# imgs_1, imgs_2, pids,...,pids2, index = inputs
inputs_1 = inputs[0].cuda()
inputs_2 = inputs[1].cuda()
pids = []
for i, pid in enumerate(inputs[3:-2]):
pids.append(pid.cuda())
index = inputs[-1].cuda()
pids.append(pid.cuda())
return [inputs_1, inputs_2] + pids + [index]
class DbscanBaseTrainer(object):
def __init__(self, model_1, model_1_ema, contrast, num_cluster=None, alpha=0.999, fc_len=3000):
super(DbscanBaseTrainer, self).__init__()
self.model_1 = model_1
self.num_cluster = num_cluster
self.c_name = [fc_len for _ in range(len(num_cluster))]
self.model_1_ema = model_1_ema
self.alpha = alpha
self.criterion_ce = CrossEntropyLabelSmooth(self.num_cluster[0],False).cuda()
self.criterion_tri = SoftTripletLoss_vallia(margin=0.0).cuda()
self.source_classes = 751
self.contrast = contrast
def train(self, epoch, data_loader_target, data_loader_source, optimizer, choice_c,
print_freq=100, train_iters=200):
self.model_1.train()
self.model_1_ema.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = [AverageMeter(), AverageMeter()]
losses_tri = [AverageMeter(), AverageMeter()]
loss_kldiv = AverageMeter()
loss_s = AverageMeter()
losses_tri_unc = AverageMeter()
contra_loss = AverageMeter()
precisions = [AverageMeter(), AverageMeter()]
end = time.time()
for i in range(train_iters):
target_inputs = data_loader_target.next()
source_inputs = data_loader_source.next()
data_time.update(time.time() - end)
# process inputs
items = self._parse_data(target_inputs)
items_source = self._parse_data(source_inputs)
inputs_1_t, inputs_2_t, index_t = items[0], items[1], items[-1]
inputs_1_s, inputs_2_s, index_s = items_source[0], items_source[1], items_source[-1]
inputs = self.range_spbn(inputs_1_s, inputs_1_t)
f_out, p_out, memory_f, _ = self.model_1(inputs, training=True)
f_out_s1, f_out_t1 = self.derange_spbn(f_out)
_, p_out_t1 = self.derange_spbn(p_out[0])
_, memory_f_t1 = self.derange_spbn(memory_f)
with torch.no_grad():
f_out_ema, p_out_ema, memory_f_ema, _ = self.model_1_ema(inputs, training=True)
f_out_s1_ema, f_out_t1_ema = self.derange_spbn(f_out_ema)
_, p_out_t1_ema = self.derange_spbn(p_out_ema[0])
_, memory_f_t1_ema = self.derange_spbn(memory_f_ema)
loss_tri_1 = self.criterion_tri(f_out_t1, f_out_t1, items[choice_c + 2])
loss_ce_1=self.criterion_ce(p_out_t1, items[2])
contra_loss_instance, contra_loss_center, ml_sour, ml_sour_ema = torch.tensor(0),torch.tensor(0),torch.tensor(0),torch.tensor(0)
#self.contrast(memory_f_t1, f_out_s1, f_out_t1, f_out_t1_ema, index_t, items_source[2], epoch=epoch)
loss_kl =loss_tri_unc= torch.tensor(0)
loss = loss_ce_1 + loss_tri_1
# if epoch % 6 != 0:
# loss = loss_ce_1 + loss_tri_1 + contra_loss_center + contra_loss_instance
# else:
# loss = loss_ce_1 + loss_tri_1 + contra_loss_center
optimizer.zero_grad()
loss.backward()
optimizer.step()
self._update_ema_variables(self.model_1, self.model_1_ema, self.alpha, epoch * len(data_loader_target) + i)
prec_1, = accuracy(p_out_t1.data, items[choice_c + 2].data)
losses_ce[0].update(loss_ce_1.item())
losses_tri[0].update(loss_tri_1.item())
loss_s.update(contra_loss_center.item())
loss_kldiv.update(loss_kl.item())
losses_tri_unc.update(loss_tri_unc.item())
contra_loss.update(contra_loss_instance.item())
precisions[0].update(prec_1[0])
# print log #
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 1:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} / loss_kldiv {:.3f}\t'
'Loss_tri {:.3f} / Loss_tri_soft {:.3f} \t'
'contra_loss_center {:.3f}\t'
'contra_loss {:.3f}\t'
'Prec {:.2%} / {:.2%}\t'
.format(epoch, i, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce[0].avg, loss_kldiv.avg,
losses_tri[0].avg,losses_tri_unc.avg, loss_s.avg, contra_loss.avg,
precisions[0].avg, precisions[1].avg))
def range_spbn(self, inputs_1_s, inputs_1_t):
# arrange batch for domain-specific BN
device_num = torch.cuda.device_count()
B, C, H, W = inputs_1_s.size()
def reshape(inputs):
return inputs.view(device_num, -1, C, H, W)
inputs_1_s, inputs_1_t = reshape(inputs_1_s), reshape(inputs_1_t)
inputs = torch.cat((inputs_1_s, inputs_1_t), 1).view(-1, C, H, W)
return inputs
def derange_spbn(self, f_out):
device_num = torch.cuda.device_count()
# de-arrange batch
f_out = f_out.view(device_num, -1, f_out.size(-1))
f_out_s, f_out_t = f_out.split(f_out.size(1) // 2, dim=1)
f_out_s, f_out_t = f_out_s.contiguous().view(-1, f_out.size(-1)), f_out_t.contiguous().view(-1, f_out.size(-1))
return f_out_s, f_out_t
def _update_ema_variables(self, model, ema_model, alpha, global_step):
alpha = min(1 - 1 / (global_step + 1), alpha)
for (ema_name, ema_param), (model_name, param) in zip(ema_model.named_parameters(), model.named_parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def _parse_data(self, inputs):
# imgs_1, imgs_2, pids,...,pids2, index = inputs
inputs_1 = inputs[0].cuda()
inputs_2 = inputs[1].cuda()
pids = []
for i, pid in enumerate(inputs[3:-2]):
pids.append(pid.cuda())
index = inputs[-1].cuda()
pids.append(pid.cuda())
return [inputs_1, inputs_2] + pids + [index]
| 24,831 | 41.01692 | 163 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/__init__.py | from __future__ import absolute_import
from . import datasets
from . import evaluation_metrics
from . import feature_extraction
from . import loss
from . import metric_learning
from . import models
from . import utils
from . import dist_metric
from . import evaluators
from . import trainers
__version__ = '1.0.0'
| 316 | 20.133333 | 38 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/dist_metric.py | from __future__ import absolute_import
import torch
from .evaluators import extract_features
from .metric_learning import get_metric
class DistanceMetric(object):
def __init__(self, algorithm='euclidean', *args, **kwargs):
super(DistanceMetric, self).__init__()
self.algorithm = algorithm
self.metric = get_metric(algorithm, *args, **kwargs)
def train(self, model, data_loader):
if self.algorithm == 'euclidean': return
features, labels = extract_features(model, data_loader)
features = torch.stack(features.values()).numpy()
labels = torch.Tensor(list(labels.values())).numpy()
self.metric.fit(features, labels)
def transform(self, X):
if torch.is_tensor(X):
X = X.numpy()
X = self.metric.transform(X)
X = torch.from_numpy(X)
else:
X = self.metric.transform(X)
return X
| 926 | 28.903226 | 63 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/multigpu.py | import time
import torch
# from util import MovingAverage
def aggreg_multi_gpu(model, dataloader, hc, dim, TYPE=torch.float64, model_gpus=1):
""""Accumulate activations and save them on multiple GPUs
* this function assumes the model is on the first `model_gpus` GPUs
so that it can write the activations on the remaining ones
* it splits the activations evenly between the remaining GPUs
"""
# number of gpus to store
ngpu_store = torch.cuda.device_count() - model_gpus#3-1
# number of batches in DL
l_dl = len(dataloader)#50000
# number of batches each gpu gets
batches_per_gpu = l_dl // ngpu_store#16666
# number of data each gpu gets
points_per_gpu = batches_per_gpu*dataloader.batch_size
# empty array of indices that we need to keep track of
indices = torch.empty(len(dataloader.dataset), dtype=torch.long)
# set up matrix PS: (N x K) when using one head, otherwise N x D, where D is the dim before the last FC layer.
PS = [torch.empty(points_per_gpu, dim,
device='cuda:' + str(i), dtype=TYPE)
for i in range(model_gpus, model_gpus + ngpu_store-1)]
# accomodate remainder
PS.append(torch.empty(len(dataloader.dataset) - (ngpu_store-1)*points_per_gpu,
dim, device='cuda:' + str(model_gpus + ngpu_store - 1), dtype=TYPE))#把除不尽的sample包括进去
# slice sizes, i.e. how many activations will be on the gpus
slices = [qq.shape[0] for qq in PS]
print("slice sizes: ", slices, flush=True)
# batch_time = MovingAverage(intertia=0.9)
now = time.time()
st = 0
softmax = torch.nn.Softmax(dim=1).to('cuda:0')
# switch the model to not output array but instead last-FC output for one head and pre-last activations for multi-heads
model.headcount = 1
for batch_idx, (data, _, _,_,_selected) in enumerate(dataloader):
data = data.to(torch.device('cuda:0'))
mass = data.size(0)
en = st + mass
# j keeps track of which part of PS we're writing to
j = min((batch_idx // batches_per_gpu), ngpu_store - 1)
subs = j*points_per_gpu
if hc == 1:
_,predicted_,_=model(data)
p = softmax(predicted_).detach().to(TYPE)
# when using one head: save softmax (N x K) matrix:
PS[j][st-subs:en-subs, :].copy_(p)
else:
# when using multiple heads: save softmax (N x D) matrix
PS[j][st-subs:en-subs, :].copy_(model(data).detach())
indices[st:en].copy_(_selected)
st = en
# batch_time.update(time.time() - now)
now = time.time()
if batch_idx % 50 == 0:
print(f"Aggregating batch {batch_idx:03}/{l_dl}, speed: {mass / batch_time.avg:04.1f}Hz. To rGPU {j+1}",
end='\r', flush=True)
torch.cuda.synchronize() # just in case
return PS, indices
def gpu_mul_Ax(A, b, ngpu, splits, TYPE=torch.float64,model_gpus=1):
""" multiplies matrix A (stored on multiple GPUs) with vector x
* returns vector on GPU 0
"""
# Step 1: make a copy of B on each GPU
N = splits[-1]
b_ = []
for i in range(model_gpus, ngpu):
b_.append(b.to('cuda:' + str(i)))
# Step 2: issue the matmul on each GPU
c = torch.empty(N, 1, device='cuda:0', dtype=TYPE)
for a,i in enumerate(range(model_gpus, ngpu)):
c[splits[a]:splits[a+1], :].copy_(torch.matmul(A[a], b_[a]))
return c
def gpu_mul_AB(A, B, c, dim, TYPE=torch.float64, model_gpus=1):
"""" multiplies to matrices A,B on GPU and adds vector c and does softmax at the end
* used to compute the effect of a linear FC layer followed by softmax
* return (N x K) matrix spread over the same GPUs as the PS matrix
"""
# Step 1: make a copy of B on each GPU
ngpu = torch.cuda.device_count() # one for the model
b_ = []
for i in range(model_gpus, ngpu):
b_.append(B.to('cuda:' + str(i)))
# Step 2: issue the matmul on each GPU
PS = []
for a, i in enumerate(range(model_gpus, ngpu)):
PS.append((torch.matmul(A[a], b_[a]) + c.to('cuda:'+str(i))).to(TYPE))
# the softmax
torch.exp(PS[a], out=PS[a])
summed = torch.sum(PS[a], dim=1, keepdim=True)
PS[a] /= summed
return PS
def gpu_mul_xA(b, A, ngpu, splits, TYPE=torch.float64, model_gpus=1):
""" multiplies vector x with matrix A (stored on multiple GPUs)
* returns vector on GPU 0
"""
# Step 1: make a copy of B on each GPU
b_ = []
for a, i in enumerate(range(model_gpus, ngpu)):
b_.append(b[:, splits[a]:splits[a+1]].to('cuda:' + str(i)))
# Step 2: issue the matmul on each GPU
c = torch.empty(ngpu-model_gpus, A[0].size(1), device='cuda:0', dtype=TYPE)
for a, i in enumerate(range(model_gpus, ngpu)):
c[a:a+1, :].copy_(torch.matmul(b_[a], A[a]))
# Step 3: need to sum these up
torch.cuda.synchronize() # just in case
c = torch.sum(c, 0, keepdim=True)
return c
| 5,048 | 40.04878 | 123 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/sinkhornknopp.py | import torch
import torch.nn as nn
import time
import numpy as np
from UDAsbs.multigpu import gpu_mul_Ax, gpu_mul_xA, aggreg_multi_gpu, gpu_mul_AB
from scipy.special import logsumexp
def py_softmax(x, axis=None):
"""stable softmax"""
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
def cpu_sk(self):
""" Sinkhorn Knopp optimization on CPU
* stores activations to RAM
* does matrix-vector multiplies on CPU
* slower than GPU
"""
# 1. aggregate inputs:
self.model.eval()
N = len(self.pseudo_loader.dataset)
if self.hc == 1:
self.PS = np.zeros((N, self.K[0]), dtype=self.dtype)
else:
self.PS_pre = np.zeros((N, self.presize), dtype=self.dtype)
now = time.time()
for batch_idx, item in enumerate(self.pseudo_loader):
data = item[0].to(self.dev)
if self.hc == 1:
_,predicted,_=self.model(data,training=True)# predicted=self.model(data,cluster=True)
p = nn.functional.softmax(predicted[0], 1)
self.PS[item[-1], :] = p.detach().cpu().numpy().astype(self.dtype)
else:
self.model.headcount = self.hc
p = self.model(data)
self.PS_pre[item[-1], :] = p.detach().cpu().numpy().astype(self.dtype)
print("Aggreg of outputs took {0:.2f} min".format((time.time() - now) / 60.), flush=True)
# 2. solve label assignment via sinkhorn-knopp:
if self.hc == 1:
optimize_L_sk(self, nh=0)
else:
for nh in range(self.hc):
print("computing head %s " % nh, end="\r", flush=True)
tl = getattr(self.model.module, "classifier{}_{}".format(nh,self.K[nh]))
time_mat = time.time()
# clear memory
try:
del self.PS
except:
pass
# apply last FC layer (a matmul and adding of bias)
self.PS = self.PS_pre @ tl.weight.cpu().detach().numpy().T.astype(self.dtype)
# + tl.bias.cpu().detach().numpy().astype(self.dtype))
print("matmul took %smin" % ((time.time() - time_mat) / 60.), flush=True)
self.PS = py_softmax(self.PS, 1)
optimize_L_sk(self, nh=nh)
return
def gpu_sk(self):
""" Sinkhorn Knopp optimization on GPU
* stores activations on multiple GPUs (needed when dataset is large)
* does matrix-vector multiplies on GPU (extremely fast)
* recommended variant
* due to multi-GPU use, it's a bit harder to understand what's happening -> see CPU variant to understand
"""
# 1. aggregate inputs:
start_t = time.time()
if self.hc == 1:
self.PS, indices = aggreg_multi_gpu(self.model, self.pseudo_loader,
hc=self.hc, dim=self.outs[0], TYPE=self.dtype)
else:
try: # just in case stuff
del self.PS_pre
except:
pass
torch.cuda.empty_cache()
time.sleep(1)
self.PS_pre, indices = aggreg_multi_gpu(self.model, self.pseudo_loader,
hc=self.hc, dim=self.presize, TYPE=torch.float32)
self.model.headcount = self.hc
print("Aggreg of outputs took {0:.2f} min".format((time.time() - start_t) / 60.), flush=True)
# 2. solve label assignment via sinkhorn-knopp:
if self.hc == 1:
optimize_L_sk_multi(self, nh=0)
self.L[0,indices] = self.L[0,:]
else:
for nh in range(self.hc):
tl = getattr(self.model, "top_layer%d" % nh)
time_mat = time.time()
try:
del self.PS
torch.cuda.empty_cache()
except:
pass
# apply last FC layer (a matmul and adding of bias)
self.PS = gpu_mul_AB(self.PS_pre, tl.weight.t(),
c=tl.bias, dim=self.outs[nh], TYPE=self.dtype)
print("matmul took %smin" % ((time.time() - time_mat) / 60.), flush=True)
optimize_L_sk_multi(self, nh=nh)
self.L[nh][indices] = self.L[nh]
return
import collections
def optimize_L_sk(self, nh=0):
N = max(self.L[nh].size())
tt = time.time()
self.PS = self.PS.T # now it is K x N
if not self.dis_gt:
r = np.ones((self.outs[nh], 1), dtype=self.dtype) / self.outs[nh]
else:
b_pesud_label = np.nanargmax(self.PS, 0)
plabel2number=dict(collections.Counter(b_pesud_label)).items()
plabel2number=sorted(plabel2number,key=lambda plabel2number:plabel2number[1])
sort_label=[label[0] for label in plabel2number]
origin_dis=self.dis_gt
deta=len(origin_dis)/ self.outs[nh]
r = np.ones((self.outs[nh], 1), dtype=self.dtype) / N
for i,sl in enumerate(sort_label[::-1]):
nn=origin_dis[0 + int(round(i * deta))]
r[sl,:] = nn
r=py_softmax(r,axis=0)
c = np.ones((N, 1), dtype=self.dtype) / N
self.PS **= self.lamb # K x N
inv_K = self.dtype(1./self.outs[nh])
inv_N = self.dtype(1./N)
err = 1e6
_counter = 0
while err > 1e-2:
r = inv_K / (self.PS @ c) # (KxN)@(N,1) = K x 1
c_new = inv_N / (r.T @ self.PS).T # ((1,K)@(KxN)).t() = N x 1
if _counter % 10 == 0:
err = np.nansum(np.abs(c / c_new - 1))
c = c_new
_counter += 1
print("error: ", err, 'step ', _counter, flush=True) # " nonneg: ", sum(I), flush=True)
# inplace calculations.
self.PS *= np.squeeze(c)
self.PS = self.PS.T
self.PS *= np.squeeze(r)
self.PS = self.PS.T
argmaxes = np.nanargmax(self.PS, 0) # size N
newL = torch.LongTensor(argmaxes)
self.L[nh] = newL.to(self.dev)
print('opt took {0:.2f}min, {1:4d}iters'.format(((time.time() - tt) / 60.), _counter), flush=True)
def optimize_L_sk_multi(self, nh=0):
""" optimizes label assignment via Sinkhorn-Knopp.
this implementation uses multiple GPUs to store the activations which allow fast matrix multiplies
Parameters:
nh (int) number of the head that is being optimized.
"""
N = max(self.L.size())
tt = time.time()
r = torch.ones((self.outs[nh], 1), device='cuda:0', dtype=self.dtype) / self.outs[nh]
c = torch.ones((N, 1), device='cuda:0', dtype=self.dtype) / N
ones = torch.ones(N, device='cuda:0', dtype=self.dtype)
inv_K = 1. / self.outs[nh]
inv_N = 1. / N
# inplace power of softmax activations:
[qq.pow_(self.lamb) for qq in self.PS] # K x N
err = 1e6
_counter = 0
ngpu = torch.cuda.device_count()
splits = np.cumsum([0] + [a.size(0) for a in self.PS])
while err > 1e-1:
r = inv_K / (gpu_mul_xA(c.t(), self.PS,
ngpu=ngpu, splits=splits, TYPE=self.dtype)).t() # ((1xN)@(NxK)).T = Kx1
c_new = inv_N / (gpu_mul_Ax(self.PS, r,
ngpu=ngpu, splits=splits, TYPE=self.dtype)) # (NxK)@(K,1) = N x 1
torch.cuda.synchronize() # just in case
if _counter % 10 == 0:
err = torch.sum(torch.abs((c.squeeze() / c_new.squeeze()) - ones)).cpu().item()
c = c_new
_counter += 1
print("error: ", err, 'step ', _counter, flush=True)
# getting the final tranportation matrix #####################
for i, qq in enumerate(self.PS):
torch.mul(qq, c[splits[i]:splits[i + 1], :].to('cuda:' + str(i + 1)), out=qq)
[torch.mul(r.to('cuda:' + str(i + 1)).t(), qq, out=qq) for i, qq in enumerate(self.PS)]
argmaxes = torch.empty(N, dtype=torch.int64, device='cuda:0')
start_idx = 0
for i, qq in enumerate(self.PS):
amax = torch.argmax(qq, 1)
argmaxes[start_idx:start_idx + len(qq)].copy_(amax)
start_idx += len(qq)
newL = argmaxes
print('opt took {0:.2f}min, {1:4d}iters'.format(((time.time() - tt) / 60.), _counter), flush=True)
# finally, assign the new labels ########################
self.L[nh] = newL
| 8,028 | 36.872642 | 117 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/memorybank/alias_multinomial.py | import torch
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
"""
Draw N samples from multinomial
:param N: number of samples
:return: samples
"""
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj
| 1,968 | 28.833333 | 120 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/memorybank/NCEAverage.py | import torch
from torch import nn
from torch.nn import functional as F
import math
from numpy.testing import assert_almost_equal
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def sigmoid(tensor, temp=1.0):
exponent = -tensor / temp
exponent = torch.clamp(exponent, min=-50, max=50)
y = 1.0 / (1.0 + torch.exp(exponent))
return y
def logsumexp(value, weight=1, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(weight*torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(weight*torch.exp(value - m))
return m + torch.log(sum_exp)
class MemoryMoCo_id(nn.Module):
"""Fixed-size queue with momentum encoder"""
def __init__(self, inputSize, outputSize, K, index2label, choice_c=1, T=0.07, use_softmax=False, cluster_num=0):
super(MemoryMoCo_id, self).__init__()
self.outputSize = outputSize
self.inputSize = inputSize
self.queueSize = K
self.T = T
self.index = 0
self.use_softmax = use_softmax
self.register_buffer('params', torch.tensor([-1]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory', torch.rand(self.queueSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('index_memory', torch.ones(self.queueSize, dtype=torch.long).fill_(-1))
print('Using queue shape: ({},{})'.format(self.queueSize, inputSize))
self.choice_c=choice_c
self.index_pl = -1#3-cluster_num if cluster_num<=3 else 0
self.index2label = index2label
self.m = 0.25
self.gamma = 128
def posandneg(self, index, batchSize, index_choice):
# pseudo logit
# pseudo_label = [torch.tensor([self.index2label[j][i.item()] for i in index], dtype=torch.long).cuda()
# for j in range(4)]
# pseudo_label = sum(pseudo_label) / 4.0
# pseudo_label=reduce(lambda x, y: x * y, pseudo_label)
pseudo_label = torch.tensor([self.index2label[index_choice][i.item()] for i in index], dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
# memory_label = [
# torch.tensor([self.index2label[j][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
# dtype=torch.long).cuda()
# for j in range(4)]
# memory_label = sum(memory_label) / 4.0
# memory_label = reduce(lambda x, y: x * y, memory_label)
memory_label = torch.tensor([self.index2label[index_choice][i.item()] if i.item() != -1 else -1
for i in self.index_memory], dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
# is_pos_weight = torch.cat((torch.ones([batchSize, 1], dtype=torch.float).cuda(), is_pos), dim=1)
# weight = torch.cat(
# (torch.ones([batchSize, 1], dtype=torch.float).cuda(), is_pos / is_pos_weight.sum(1, keepdim=True)), dim=1)
# is_pos = is_pos_weight
is_pos = torch.cat((torch.ones([batchSize, 1], dtype=torch.float).cuda(), is_pos), dim=1)
is_neg = pseudo_label.ne(memory_label).float()
is_neg = torch.cat((torch.zeros([batchSize, 1], dtype=torch.float).cuda(), is_neg), dim=1)
# is_neg = torch.cat((torch.zeros([batchSize, 1], dtype=torch.float).cuda(), is_neg), dim=1)
return is_pos, is_neg
def update(self,q1, q2, index):
batchSize = q1.shape[0]
with torch.no_grad():
q1 = q1.detach()
q2 = q2.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize)
out_ids = out_ids.long()
self.memory.index_copy_(0, out_ids, (q1+q2)/2.0)
self.index_memory.index_copy_(0, out_ids, index)
self.index = (self.index + batchSize) % self.queueSize
def forward(self, q1, q2, index, epoch=0):
batchSize = q1.shape[0]
q1 = normalize(q1, axis=-1)
q2 = normalize(q2, axis=-1)
#is_pos0, is_neg0 = self.posandneg(index, batchSize, 0)
is_pos1, is_neg1 = self.posandneg(index, batchSize, self.choice_c)
#is_pos2, is_neg2 = self.posandneg(index, batchSize, 2)
#is_pos3, is_neg3 = self.posandneg(index, batchSize, 3)
is_pos =is_pos1# (is_pos0 + is_pos1 + is_pos2 + is_pos3)/4.0
is_neg =is_neg1# (is_neg0 + is_neg1 + is_neg2 + is_neg3)/4.0
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), ((q1+q2)/2.0).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous() # (bs, queue_size)
# pos logit for self
l_pos = torch.bmm(q1.view(batchSize, 1, -1), q2.view(batchSize, -1, 1))
l_pos_self = l_pos.contiguous().view(batchSize, 1)
sim_mat = torch.cat((l_pos_self, l_logist), dim=1)
s_p = sim_mat * is_pos#0#[is_pos].contiguous()#.view(batchSize, -1)
# s_p = torch.div(s_p, self.T)
s_n = sim_mat * is_neg#3#[is_neg].contiguous()#.view(batchSize, -1)
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
# logit_p_1 = torch.exp(logit_p * is_pos) * is_pos
# logit_p_2 = logit_p_1.sum(1)
# logit_p_3 = torch.log(logit_p_2+ 1e-16)
# logit_n = torch.log((torch.exp(logit_n) * is_neg).sum(1) + 1e-16)
# loss = F.softplus(logit_p+logit_n).mean()
loss = F.softplus(logsumexp(logit_p - 99999.0 * is_neg, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, dim=1)).mean() / 18.0 # weight,
#loss= F.softplus(logsumexp(logit_p - 99999.0 * is_neg0,is_pos, dim=1) +
# logsumexp(logit_n - 99999.0 * is_pos3,is_neg, dim=1)).mean()/18.0#weight,
# update memory
with torch.no_grad():
q1 = q1.detach()
q2 = q2.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize)
out_ids = out_ids.long()
self.memory.index_copy_(0, out_ids, (q1+q2)/2.0)
self.index_memory.index_copy_(0, out_ids, index)
self.index = (self.index + batchSize) % self.queueSize
return loss
class onlinememory(nn.Module):
"""Fixed-size queue with momentum encoder"""
def __init__(self, inputSize, outputSize, sour_numclass, K, index2label, choice_c=1, T=0.07, use_softmax=False, cluster_num=0):
super(onlinememory, self).__init__()
self.outputSize = outputSize
self.inputSize = inputSize
self.sour_numclass = sour_numclass
self.queueSize = K
self.T = T
self.index = 0
self.use_softmax = use_softmax
self.register_buffer('params', torch.tensor([-1]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory', torch.rand(self.queueSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('index_memory', torch.ones(self.queueSize, dtype=torch.long).fill_(-1))
self.register_buffer('uncer', torch.ones(self.queueSize, dtype=torch.float).fill_(1))
print('Using queue shape: ({},{})'.format(self.queueSize, inputSize))
# self.register_buffer('sour_memory', torch.rand(self.sour_numclass, inputSize).mul_(2 * stdv).add_(-stdv))
# self.register_buffer('sour_index_memory', torch.ones(self.sour_numclass, dtype=torch.long).fill_(-1))
# print('Using queue shape: ({},{})'.format(self.sour_numclass, inputSize))
self.choice_c = choice_c
self.index_pl = -1 # 3-cluster_num if cluster_num<=3 else 0
self.index2label = index2label
self.m = 0.25
self.gamma = 128
self.momentum=0.2
################
#his loss
num_steps=151
self.step = 2 / (num_steps - 1)
self.eps = 1 / num_steps
self.t = torch.arange(-1, 1 + self.step, self.step).view(-1, 1).cuda()
self.tsize = self.t.size()[0]
###############
# smooth ap loss
self.anneal = 0.01
self.num_id=16
def memo_contr_loss(self,index,q1):
batchSize = q1.shape[0]
# import ipdb;ipdb.set_trace()
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
outputs = F.log_softmax(sim_mat, dim=1)
loss = - (is_pos * outputs)
loss = loss.sum(dim=1)
loss = loss.mean(dim=0)
return loss
def memo_circle_loss(self,index,q1,uncer):
batchSize = q1.shape[0]
# import ipdb;ipdb.set_trace()
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
# exp_variance = exp_variance.detach()
# exp_variance = exp_variance.unsqueeze(1).expand(batchSize, self.queueSize)
s_p = sim_mat * is_pos
#s_p = torch.div(s_p, self.T)
s_n = sim_mat * is_neg #* exp_variance
exp_variance = 1#(uncer.unsqueeze(1).expand(batchSize, self.queueSize) + self.uncer.clone().unsqueeze(0).expand(batchSize, self.queueSize))/2.0
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
# ,weight=exp_variance
loss = (F.softplus(logsumexp(logit_p - 99999.0 * is_neg,weight=exp_variance, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos,weight=exp_variance, dim=1))).mean()
return loss
def memo_center_circle_loss(self,index,q1):
batchSize = q1.shape[0]
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.sour_numclass)
# pseudo_label = index.expand(batchSize, self.sour_numclass)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i] for i in range(self.sour_numclass)],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.sour_numclass)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory[:self.sour_numclass,:].clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
s_p = sim_mat * is_pos
s_n = sim_mat * is_neg
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
loss = F.softplus(logsumexp(logit_p - 99999.0 * is_neg, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, dim=1)).mean() / 18.0
return loss
def his_loss(self,classes,features):
classes = torch.tensor([self.index2label[0][i.item()] for i in classes],
dtype=torch.long).cuda()
def histogram(inds, size):
s_repeat_ = s_repeat.clone()
indsa = (s_repeat_floor - (self.t - self.step) > -self.eps) & (
s_repeat_floor - (self.t - self.step) < self.eps) & inds
assert indsa.nonzero().size()[0] == size, ('Another number of bins should be used')
zeros = torch.zeros((1, indsa.size()[1])).byte()
if self.cuda:
zeros = zeros.cuda()
indsb = torch.cat((indsa, zeros))[1:, :]
s_repeat_[~(indsb | indsa)] = 0
# indsa corresponds to the first condition of the second equation of the paper
s_repeat_[indsa] = (s_repeat_ - self.t + self.step)[indsa] / self.step
# indsb corresponds to the second condition of the second equation of the paper
s_repeat_[indsb] = (-s_repeat_ + self.t + self.step)[indsb] / self.step
return s_repeat_.sum(1) / size
classes_size = classes.size()[0]
classes_eq = (classes.repeat(classes_size, 1) == classes.view(-1, 1).repeat(1, classes_size)).data
dists = torch.mm(features, features.transpose(0, 1))
assert ((dists > 1 + self.eps).sum().item() + (
dists < -1 - self.eps).sum().item()) == 0, 'L2 normalization should be used'
s_inds = torch.triu(torch.ones(classes_eq.size()), 1).byte()
if self.cuda: s_inds = s_inds.cuda()
pos_inds = classes_eq[s_inds].repeat(self.tsize, 1)#18001,2016
neg_inds = ~classes_eq[s_inds].repeat(self.tsize, 1)#18001,2016
pos_size = classes_eq[s_inds].sum().item()
neg_size = (~classes_eq[s_inds]).sum().item()
s = dists[s_inds].view(1, -1)
s_repeat = s.repeat(self.tsize, 1)
s_repeat_floor = (torch.floor(s_repeat.data / self.step) * self.step).float()
histogram_pos = histogram(pos_inds, pos_size)
assert_almost_equal(histogram_pos.sum().item(), 1, decimal=1,
err_msg='Not good positive histogram', verbose=True)
histogram_neg = histogram(neg_inds, neg_size)
assert_almost_equal(histogram_neg.sum().item(), 1, decimal=1,
err_msg='Not good negative histogram', verbose=True)
histogram_pos_repeat = histogram_pos.view(-1, 1).repeat(1, histogram_pos.size()[0])
histogram_pos_inds = torch.tril(torch.ones(histogram_pos_repeat.size()), -1).byte()
if self.cuda:
histogram_pos_inds = histogram_pos_inds.cuda()
histogram_pos_repeat[histogram_pos_inds] = 0
histogram_pos_cdf = histogram_pos_repeat.sum(0)
loss = torch.sum(histogram_neg * histogram_pos_cdf)
return loss
def smooth_ap(self, targets,embedding):
targets= torch.tensor([self.index2label[0][i.item()] for i in targets],
dtype=torch.long).cuda()
# For distributed training, gather all features from different process.
all_embedding = self.memory.clone().detach()
all_targets = torch.tensor(
[self.index2label[0][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
dtype=torch.long).cuda()
sim_dist = torch.matmul(embedding, all_embedding.t())
N, M = sim_dist.size()
# Compute the mask which ignores the relevance score of the query to itself
mask_indx = 1.0 - torch.eye(M, device=sim_dist.device)
mask_indx = mask_indx.unsqueeze(dim=0).repeat(N, 1, 1) # (N, M, M)
# sim_dist -> N, 1, M -> N, M, N
sim_dist_repeat = sim_dist.unsqueeze(dim=1).repeat(1, M, 1) # (N, M, M)
# sim_dist_repeat_t = sim_dist.t().unsqueeze(dim=1).repeat(1, N, 1) # (N, N, M)
# Compute the difference matrix
sim_diff = sim_dist_repeat - sim_dist_repeat.permute(0, 2, 1) # (N, M, M)
# Pass through the sigmoid
sim_sg = sigmoid(sim_diff, temp=self.anneal) * mask_indx
# Compute all the rankings
sim_all_rk = torch.sum(sim_sg, dim=-1) + 1 # (N, N)
pos_mask = targets.view(N, 1).expand(N, M).eq(all_targets.view(M, 1).expand(M, N).t()).float() # (N, M)
pos_mask_repeat = pos_mask.unsqueeze(1).repeat(1, M, 1) # (N, M, M)
# Compute positive rankings
pos_sim_sg = sim_sg * pos_mask_repeat
sim_pos_rk = torch.sum(pos_sim_sg, dim=-1) + 1 # (N, N)
# sum the values of the Smooth-AP for all instances in the mini-batch
ap = 0
group = N // self.num_id
for ind in range(self.num_id):
pos_divide = torch.sum(
sim_pos_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)] / (
sim_all_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)]))
ap += pos_divide / torch.sum(pos_mask[ind * group]) / N
return 1 - ap
def _smooth_ap(self, targets,embedding):
"""Forward pass for all input predictions: preds - (batch_size x feat_dims) """
# ------ differentiable ranking of all retrieval set ------
embedding = F.normalize(embedding, dim=1)
# For distributed training, gather all features from different process.
sim_dist = torch.matmul(embedding, self.memory[:self.queueSize-self.sour_numclass,:].t().detach())
N, M = sim_dist.size()
# Compute the mask which ignores the relevance score of the query to itself
mask_indx = 1.0 - torch.eye(M, device=sim_dist.device)
mask_indx = mask_indx.unsqueeze(dim=0).repeat(N, 1, 1) # (N, M, M)
# sim_dist -> N, 1, M -> N, M, N
sim_dist_repeat = sim_dist.unsqueeze(dim=1).repeat(1, M, 1) # (N, M, M)
# Compute the difference matrix
sim_diff = sim_dist_repeat - sim_dist_repeat.permute(0, 2, 1) # (N, M, M)
# Pass through the sigmoid
sim_sg = sigmoid(sim_diff, temp=self.anneal) * mask_indx
# Compute all the rankings
sim_all_rk = torch.sum(sim_sg, dim=-1) + 1 # (N, N)r
targets = torch.tensor([self.index2label[0][i.item()] for i in targets],
dtype=torch.long).cuda()
queue_label = torch.tensor([self.index2label[0][i.item()] if i.item() != -1
else -1 for i in self.index_memory],
dtype=torch.long).cuda()[self.sour_numclass:]
pos_mask = targets.view(N, 1).expand(N, M).eq(queue_label.view(M, 1).expand(M, N).t()).float() # (N, M)
pos_mask_repeat = pos_mask.unsqueeze(1).repeat(1, M, 1) # (N, M, M)
# Compute positive rankings
pos_sim_sg = sim_sg * pos_mask_repeat
sim_pos_rk = torch.sum(pos_sim_sg, dim=-1) + 1 # (N, N)
# sum the values of the Smooth-AP for all instances in the mini-batch
ap = 0
group = N // self.num_id
for ind in range(self.num_id):
pos_divide = torch.sum(
sim_pos_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)] / (
sim_all_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)]))
ap += pos_divide / torch.sum(pos_mask[ind * group]) / N
return 1 - ap
def forward(self, q1, q2, tar_tri, tar_tri_ema, index, sour_labels, uncer=None, epoch=0):
batchSize = q1.shape[0]
# tar_tri = normalize(tar_tri, axis=-1)
q1 = normalize(q1, axis=-1)
q2 = normalize(q2, axis=-1)
# loss_q1 = self.memo_contr_loss(index+self.sour_numclass, q1)
loss_q1 = self.memo_circle_loss(index + self.sour_numclass, q1, uncer)
# loss_q1 = self._smooth_ap(index + self.sour_numclass, q1)
loss_q2 = self.memo_center_circle_loss(sour_labels, q2)
# with torch.no_grad():
# queue = self.memory[:self.sour_numclass, :].clone()
# ml_sour = torch.matmul(tar_tri,queue.transpose(1, 0).detach())
# ml_sour_ema = torch.matmul(tar_tri_ema, queue.transpose(1, 0).detach())
# update memory
with torch.no_grad():
q1 = q1.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize-self.sour_numclass)
out_ids = (out_ids+self.sour_numclass).long()
self.memory.index_copy_(0, out_ids, q1)
self.index_memory.index_copy_(0, out_ids, index + self.sour_numclass)
self.uncer.index_copy_(0, out_ids, uncer)
self.index = (self.index + batchSize) % (self.queueSize-self.sour_numclass)
for x, y in zip(q2, sour_labels):
self.memory[y] = self.momentum * self.memory[y] + (1. - self.momentum) * x
self.memory[y] /= self.memory[y].norm()
return loss_q1, loss_q2, None, None
| 22,548 | 44.370221 | 151 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/memorybank/NCECriterion.py | import torch
from torch import nn
import torch.nn.functional as F
eps = 1e-7
class NCECriterion(nn.Module):
"""
Eq. (12): L_{memorybank}
"""
def __init__(self, n_data):
super(NCECriterion, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class NCESoftmaxLoss(nn.Module):
"""Softmax cross-entropy loss (a.k.a., info-memorybank loss in CPC paper)"""
def __init__(self):
super(NCESoftmaxLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, is_pos=None):
bsz = x.shape[0]
x = x.squeeze()
label = torch.zeros([bsz]).cuda().long()
loss = self.criterion(x, label)
return loss
class MultiSoftmaxLoss(nn.Module):
def __init__(self):
super().__init__()
# self.criterion = nn.KLDivLoss(reduction='batchmean')
# self.criterion = nn.CrossEntropyLoss()
# self.criterion = nn.NLLLoss(reduction='mean')
def forward(self, x, is_neg):
bsz = x.shape[0]
# ce_loss = self.criterion(x, torch.zeros([bsz]).cuda().long())
x = x.squeeze()
x = torch.exp(x)
is_neg = is_neg.float()
is_need = torch.cat((torch.ones([bsz, 1], dtype=torch.float).cuda(), is_neg), dim=1)
neg_div = (x * is_need).sum(dim=1, keepdim=True)
x_logit = x[:,0] / neg_div
x_logit = -torch.log(x_logit)
loss = x_logit.mean()
# x_mask = x_logit * is_pos.float()
# num_pos = is_pos.sum(dim=1, keepdim=True).float()
# x_mask = x_mask / num_pos
# loss = x_logit.sum(dim=1).mean(dim=0)
return loss
# loss = 0
# for i in range(bsz):
# tmp_loss = 0
# pos_inds = torch.where(is_pos[i] == 1)[0].tolist()
# num_pos = len(pos_inds)
# for j in pos_inds:
# tmp_loss -= torch.log(x[i, j] / (neg_div[i][0] + x[i, j]))
# loss += (tmp_loss / num_pos)
# loss = loss / bsz
#
# print(loss)
# print(fast_loss)
# from ipdb import set_trace; set_trace()
# print(ce_loss)
# print(loss)
# def forward(self, x, is_pos):
# is_pos = is_pos.float()
# bsz = x.shape[0]
# x = x.squeeze()
#
# label = torch.zeros([bsz]).cuda().long()
# # loss = self.criterion1(x, ce_label)
#
# # from ipdb import set_trace; set_trace()
# # is_neg = 1 - is_pos[:, 1:]
# x = F.softmax(x, dim=1)
# x = (x * is_pos).sum(dim=1, keepdim=True)
# # neg_logit = (x * is_neg)
# # x = torch.cat((pos_logit, x[:, 1:]), dim=1) # [bsz, 16385]
# # x = torch.log(x)
#
# loss = self.criterion(x.log(), label)
# return loss
# x = F.softmax(x, dim=1)
# label = torch.cat((torch.ones([bsz, 1], dtype=torch.float32).cuda(), is_pos), dim=1) # (bsz, dim)
# label = F.softmax(label, dim=1)
# label = label / label.sum(dim=1, keepdim=True)
# loss = torch.sum(x * torch.log(1e-9 + x / (label + 1e-9)), dim=1).mean(dim=0)
# loss = torch.sum(x * (1e-9 + torch.log(x) - torch.log(label + 1e-9)), dim=1).mean(dim=0)
# from ipdb import set_trace; set_trace()
# loss = self.criterion(x, label)
# return loss
| 3,840 | 29.975806 | 108 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/resnet_multi.py | from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import Parameter
from torch.nn import init
import torchvision
import torch
from ..layers import (
IBN,
Non_local,
get_norm,
)
from .gem_pooling import GeneralizedMeanPoolingP
__all__ = ['ResNet', 'resnet50_multi']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, mb_h=2048, NL=False, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=None,sour_class=751):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.maxpool) # no relu
self.layer1=resnet.layer1
self.layer2=resnet.layer2
self.layer3=resnet.layer3
self.layer4=resnet.layer4
layers= {34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth]
non_layers = {34: [3, 4, 6, 3], 50: [0, 2, 3, 0], 101: [0, 2, 9, 0]}[depth]
num_splits=1
if NL:
self._build_nonlocal(layers, non_layers, 'BN', num_splits)
else:
self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
# print("w/o GeneralizedMeanPoolingP")
# self.gap = nn.AdaptiveAvgPool2d(1)
print("GeneralizedMeanPoolingP")
self.gap = GeneralizedMeanPoolingP(3)
self.memorybank_fc = nn.Linear(2048, mb_h)
self.mbn=nn.BatchNorm1d(mb_h)
init.kaiming_normal_(self.memorybank_fc.weight, mode='fan_out')
init.constant_(self.memorybank_fc.bias, 0)
# self.memorybank_fc = nn.Sequential(
# nn.Linear(2048, 512, bias=True),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(),
# nn.Linear(512, 128, bias=False),
# nn.BatchNorm1d(128)
# )
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
# Append new layers
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
# Change the num_features to CNN output channels
self.num_features = out_planes
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn_3 = nn.BatchNorm1d(1024)
self.feat_bn.bias.requires_grad_(False)
self.feat_bn_3.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("self.classifier{}_{} = nn.Linear(self.num_features, {}, bias=False)".format(i,num_cluster,num_cluster))
exec("init.normal_(self.classifier{}_{}.weight, std=0.001)".format(i,num_cluster))
for i,num_cluster in enumerate(self.num_classes):
exec("self.classifier3_{}_{} = nn.Linear(1024, {}, bias=False)".format(i,num_cluster,num_cluster))
exec("init.normal_(self.classifier3_{}_{}.weight, std=0.001)".format(i,num_cluster))
# self.weight = Parameter(torch.FloatTensor(self.num_classes[0],self.num_features))
# nn.init.xavier_uniform_(self.weight)
# self.weight3 = Parameter(torch.FloatTensor(self.num_classes[0],1024))
# nn.init.xavier_uniform_(self.weight3)
# sour_class=751
# self.classifier_ml = nn.Sequential(
# nn.Linear(self.num_features, 512, bias=True),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(),
# nn.Linear(512, sour_class, bias=False),
# nn.BatchNorm1d(sour_class)
# )
if not pretrained:
self.reset_params()
def _build_nonlocal(self, layers, non_layers, bn_norm, num_splits):
self.NL_1 = nn.ModuleList(
[Non_local(256, bn_norm, num_splits) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList(
[Non_local(512, bn_norm, num_splits) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList(
[Non_local(1024, bn_norm, num_splits) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList(
[Non_local(2048, bn_norm, num_splits) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])])
def forward(self, x, feature_withbn=False, training=False, cluster=False):
x = self.base(x)
NL1_counter = 0
if len(self.NL_1_idx) == 0:
self.NL_1_idx = [-1]
for i in range(len(self.layer1)):
x = self.layer1[i](x)
if i == self.NL_1_idx[NL1_counter]:
_, C, H, W = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
# Layer 2
NL2_counter = 0
if len(self.NL_2_idx) == 0:
self.NL_2_idx = [-1]
for i in range(len(self.layer2)):
x = self.layer2[i](x)
if i == self.NL_2_idx[NL2_counter]:
_, C, H, W = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
# Layer 3
x3=x
NL3_counter = 0
if len(self.NL_3_idx) == 0:
self.NL_3_idx = [-1]
for i in range(len(self.layer3)):
x3 = self.layer3[i](x3)
if i == self.NL_3_idx[NL3_counter]:
_, C, H, W = x3.shape
x3 = self.NL_3[NL3_counter](x3)
NL3_counter += 1
# Layer 4
x4=x3
NL4_counter = 0
if len(self.NL_4_idx) == 0:
self.NL_4_idx = [-1]
for i in range(len(self.layer4)):
x4 = self.layer4[i](x4)
if i == self.NL_4_idx[NL4_counter]:
_, C, H, W = x.shape
x4 = self.NL_4[NL4_counter](x4)
NL4_counter += 1
x = self.gap(x4)
x3 = self.gap(x3)
x = x.view(x.size(0), -1)
x3 = x3.view(x3.size(0), -1)
bn_x = self.feat_bn(x)
bn_x3 = self.feat_bn_3(x3)
# if training is False:
# bn_x = F.normalize(bn_x)
# return bn_x
if self.dropout > 0:#FALSE
bn_x = self.drop(bn_x)
prob = []
prob_3=[]
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("prob.append(self.classifier{}_{}(bn_x))".format(i,num_cluster))
for i, num_cluster in enumerate(self.num_classes):
exec("prob_3.append(self.classifier3_{}_{}(bn_x3))".format(i, num_cluster))
else:
return x, bn_x
if feature_withbn:#False
return bn_x, prob
mb_x = self.mbn(self.memorybank_fc(bn_x))
# ml_x = self.classifier_ml(bn_x)
# prob = [F.linear(F.normalize(bn_x), F.normalize(self.weight))]
# prob_3 = [F.linear(F.normalize(bn_x3), F.normalize(self.weight3))]
if training is False:
bn_x = F.normalize(bn_x)
return bn_x
return x, prob, mb_x, None, prob_3, x3
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
resnet = ResNet.__factory[self.depth](pretrained=self.pretrained)
self.base[0].load_state_dict(resnet.conv1.state_dict())
self.base[1].load_state_dict(resnet.bn1.state_dict())
self.base[2].load_state_dict(resnet.maxpool.state_dict())
self.base[3].load_state_dict(resnet.layer1.state_dict())
self.base[4].load_state_dict(resnet.layer2.state_dict())
self.base[5].load_state_dict(resnet.layer3.state_dict())
self.base[6].load_state_dict(resnet.layer4.state_dict())
def resnet50_multi(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h, sour_class=sour_class,**kwargs)
def resnet50_multi_sbs(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h,sour_class=sour_class,NL=True, **kwargs)
| 9,954 | 36.566038 | 129 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/memory_bank.py | import torch
from torch import nn
from torch.nn import functional as F
import math
from numpy.testing import assert_almost_equal
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def sigmoid(tensor, temp=1.0):
exponent = -tensor / temp
exponent = torch.clamp(exponent, min=-50, max=50)
y = 1.0 / (1.0 + torch.exp(exponent))
return y
def logsumexp(value, weight = 1, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(weight * torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(weight * torch.exp(value - m))
return m + torch.log(sum_exp)
class onlinememory(nn.Module):
"""Fixed-size queue with momentum encoder"""
def __init__(self, inputSize, sour_numclass, K, index2label, choice_c=1, T=0.07, use_softmax=False,
cluster_num=0):
super(onlinememory, self).__init__()
self.inputSize = inputSize
self.sour_numclass = sour_numclass
self.queueSize = K
self.T = T
self.index = 0
self.use_softmax = use_softmax
self.register_buffer('params', torch.tensor([-1]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory', torch.rand(self.queueSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('index_memory', torch.ones(self.queueSize, dtype=torch.long).fill_(-1))
self.register_buffer('uncer', torch.ones(self.queueSize, dtype=torch.float).fill_(1))
print('Using queue shape: ({},{})'.format(self.queueSize, inputSize))
# self.register_buffer('sour_memory', torch.rand(self.sour_numclass, inputSize).mul_(2 * stdv).add_(-stdv))
# self.register_buffer('sour_index_memory', torch.ones(self.sour_numclass, dtype=torch.long).fill_(-1))
# print('Using queue shape: ({},{})'.format(self.sour_numclass, inputSize))
self.choice_c = choice_c
self.index_pl = -1 # 3-cluster_num if cluster_num<=3 else 0
self.index2label = index2label
self.m = 0.25
self.gamma = 128
self.momentum = 0.2
################
# his loss
num_steps = 151
self.step = 2 / (num_steps - 1)
self.eps = 1 / num_steps
self.t = torch.arange(-1, 1 + self.step, self.step).view(-1, 1).cuda()
self.tsize = self.t.size()[0]
###############
# smooth ap loss
self.anneal = 0.01
self.num_id = 16
def memo_circle_loss(self, index, q1, uncer):
batchSize = q1.shape[0]
# import ipdb;ipdb.set_trace()
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
memory_label_list=[]
for i in self.index_memory:
try:
if i.item() == -1:
memory_label_list.append(-1)
else:
memory_label_list.append(self.index2label[self.choice_c][i.item()])
except:
print("error index{}".format(i.item()))
memory_label = torch.tensor(memory_label_list,dtype=torch.long).cuda()
# memory_label = torch.tensor(
# [self.index2label[self.choice_c][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
# dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
s_p = sim_mat * is_pos
s_n = sim_mat * is_neg
if uncer is not None:
exp_variance = (uncer.unsqueeze(1).expand(batchSize, self.queueSize) +self.uncer.clone().unsqueeze(0).expand(batchSize, self.queueSize)) / 2.0
else:
exp_variance=1
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
# ,weight=exp_variance
loss = (F.softplus(logsumexp(logit_p - 99999.0 * is_neg, weight=exp_variance, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, weight=exp_variance, dim=1))).mean()/ 18.0
return loss
def memo_center_circle_loss(self, index, q1):
batchSize = q1.shape[0]
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.sour_numclass)
# pseudo_label = index.expand(batchSize, self.sour_numclass)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i] for i in range(self.sour_numclass)],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.sour_numclass)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory[:self.sour_numclass, :].clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
s_p = sim_mat * is_pos
s_n = sim_mat * is_neg
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
loss = F.softplus(logsumexp(logit_p - 99999.0 * is_neg, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, dim=1)).mean() / 18.0
return loss
def forward(self, q1, q2, index, tar_tri, tar_tri_ema, sour_labels, uncer=None, epoch=0):
batchSize = q1.shape[0]
# tar_tri = normalize(tar_tri, axis=-1)
q1 = normalize(q1, axis=-1)
q2 = normalize(q2, axis=-1)
loss_q1 = self.memo_circle_loss(index + self.sour_numclass, q1, uncer)
loss_q2 = self.memo_center_circle_loss(sour_labels, q2)
with torch.no_grad():
q1 = q1.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize - self.sour_numclass)
out_ids = (out_ids + self.sour_numclass).long()
self.memory.index_copy_(0, out_ids, q1)
self.index_memory.index_copy_(0, out_ids, index + self.sour_numclass)
if uncer is not None:
self.uncer.index_copy_(0, out_ids, uncer)
self.index = (self.index + batchSize) % (self.queueSize - self.sour_numclass)
for x, y in zip(q2, sour_labels):
self.memory[y] = self.momentum * self.memory[y] + (1. - self.momentum) * x
self.memory[y] /= self.memory[y].norm()
return loss_q1, loss_q2, None, None
| 7,998 | 38.019512 | 154 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/resnet.py | from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
from ..layers import (
IBN,
Non_local,
get_norm,
)
from .gem_pooling import GeneralizedMeanPoolingP
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet50_sbs']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, mb_h=2048, with_nl=False,pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=None, sour_class=751):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.maxpool) # no relu
self.layer1=resnet.layer1
self.layer2=resnet.layer2
self.layer3=resnet.layer3
self.layer4=resnet.layer4
layers= {34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth]
non_layers = {34: [3, 4, 6, 3], 50: [0, 2, 3, 0], 101: [0, 2, 9, 0]}[depth]
num_splits=1
if with_nl:
self._build_nonlocal(layers, non_layers, 'BN', num_splits)
else:
self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
# print("w/o GeneralizedMeanPoolingP")
# self.gap = nn.AdaptiveAvgPool2d(1)
print("GeneralizedMeanPoolingP")
self.gap = GeneralizedMeanPoolingP(3)
self.memorybank_fc = nn.Linear(2048, mb_h)
self.mbn=nn.BatchNorm1d(mb_h)
init.kaiming_normal_(self.memorybank_fc.weight, mode='fan_out')
init.constant_(self.memorybank_fc.bias, 0)
# self.memorybank_fc = nn.Sequential(
# nn.Linear(2048, 512, bias=True),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(),
# nn.Linear(512, 128, bias=False),
# nn.BatchNorm1d(128)
# )
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
# Append new layers
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
# Change the num_features to CNN output channels
self.num_features = out_planes
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("self.classifier{}_{} = nn.Linear(self.num_features, {}, bias=False)".format(i,num_cluster,num_cluster))
exec("init.normal_(self.classifier{}_{}.weight, std=0.001)".format(i,num_cluster))
if not pretrained:
self.reset_params()
def _build_nonlocal(self, layers, non_layers, bn_norm, num_splits):
self.NL_1 = nn.ModuleList(
[Non_local(256, bn_norm, num_splits) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList(
[Non_local(512, bn_norm, num_splits) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList(
[Non_local(1024, bn_norm, num_splits) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList(
[Non_local(2048, bn_norm, num_splits) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])])
def forward(self, x, feature_withbn=False, training=False, cluster=False):
x = self.base(x)
NL1_counter = 0
if len(self.NL_1_idx) == 0:
self.NL_1_idx = [-1]
for i in range(len(self.layer1)):
x = self.layer1[i](x)
if i == self.NL_1_idx[NL1_counter]:
_, C, H, W = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
# Layer 2
NL2_counter = 0
if len(self.NL_2_idx) == 0:
self.NL_2_idx = [-1]
for i in range(len(self.layer2)):
x = self.layer2[i](x)
if i == self.NL_2_idx[NL2_counter]:
_, C, H, W = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
# Layer 3
NL3_counter = 0
if len(self.NL_3_idx) == 0:
self.NL_3_idx = [-1]
for i in range(len(self.layer3)):
x = self.layer3[i](x)
if i == self.NL_3_idx[NL3_counter]:
_, C, H, W = x.shape
x = self.NL_3[NL3_counter](x)
NL3_counter += 1
# Layer 4
NL4_counter = 0
if len(self.NL_4_idx) == 0:
self.NL_4_idx = [-1]
for i in range(len(self.layer4)):
x = self.layer4[i](x)
if i == self.NL_4_idx[NL4_counter]:
_, C, H, W = x.shape
x = self.NL_4[NL4_counter](x)
NL4_counter += 1
x = self.gap(x)
x = x.view(x.size(0), -1)
if self.cut_at_pooling:return x#FALSE
if self.has_embedding:
bn_x = self.feat_bn(self.feat(x))#FALSE
else:
bn_x = self.feat_bn(x)#1
if training is False:
bn_x = F.normalize(bn_x)
return bn_x
if self.norm:#FALSE
bn_x = F.normalize(bn_x)
elif self.has_embedding:#FALSE
bn_x = F.relu(bn_x)
if self.dropout > 0:#FALSE
bn_x = self.drop(bn_x)
prob = []
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("prob.append(self.classifier{}_{}(bn_x))".format(i,num_cluster))
else:
return x, bn_x
if feature_withbn:#False
return bn_x, prob
mb_x = self.mbn(self.memorybank_fc(bn_x))
return x, prob, mb_x, None
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
resnet = ResNet.__factory[self.depth](pretrained=self.pretrained)
self.base[0].load_state_dict(resnet.conv1.state_dict())
self.base[1].load_state_dict(resnet.bn1.state_dict())
self.base[2].load_state_dict(resnet.maxpool.state_dict())
self.base[3].load_state_dict(resnet.layer1.state_dict())
self.base[4].load_state_dict(resnet.layer2.state_dict())
self.base[5].load_state_dict(resnet.layer3.state_dict())
self.base[6].load_state_dict(resnet.layer4.state_dict())
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h, sour_class=sour_class, **kwargs)
def resnet50_sbs(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h, sour_class=sour_class,with_nl=True, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
| 8,933 | 34.879518 | 129 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/gem_pooling.py | # encoding: utf-8
"""
@author: l1aoxingyu
@contact: [email protected]
"""
import torch
import torch.nn.functional as F
from torch import nn
class GeneralizedMeanPooling(nn.Module):
r"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-6):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size).pow(1. / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ str(self.p) + ', ' \
+ 'output_size=' + str(self.output_size) + ')'
class GeneralizedMeanPoolingP(GeneralizedMeanPooling):
""" Same, but norm is trainable
"""
def __init__(self, norm=3, output_size=1, eps=1e-6):
super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps)
self.p = nn.Parameter(torch.ones(1) * norm) | 1,764 | 35.020408 | 106 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/dsbn.py | import torch
import torch.nn as nn
# Domain-specific BatchNorm
class DSBN2d(nn.Module):
def __init__(self, planes):
super(DSBN2d, self).__init__()
self.num_features = planes
self.BN_S = nn.BatchNorm2d(planes)
self.BN_T = nn.BatchNorm2d(planes)
def forward(self, x):
if (not self.training):
return self.BN_T(x)
bs = x.size(0)
assert (bs%2==0)
split = torch.split(x, int(bs/2), 0)
out1 = self.BN_S(split[0].contiguous())
out2 = self.BN_T(split[1].contiguous())
out = torch.cat((out1, out2), 0)
return out
class DSBN1d(nn.Module):
def __init__(self, planes):
super(DSBN1d, self).__init__()
self.num_features = planes
self.BN_S = nn.BatchNorm1d(planes)
self.BN_T = nn.BatchNorm1d(planes)
def forward(self, x):
if (not self.training):
return self.BN_T(x)
bs = x.size(0)
assert (bs%2==0)
split = torch.split(x, int(bs/2), 0)
out1 = self.BN_S(split[0].contiguous())
out2 = self.BN_T(split[1].contiguous())
out = torch.cat((out1, out2), 0)
return out
def convert_dsbn(model):
for _, (child_name, child) in enumerate(model.named_children()):
# if 'NL_' in child_name:continue
if isinstance(child, nn.BatchNorm2d):
m = DSBN2d(child.num_features)
m.BN_S.load_state_dict(child.state_dict())
m.BN_T.load_state_dict(child.state_dict())
setattr(model, child_name, m)
elif isinstance(child, nn.BatchNorm1d):
m = DSBN1d(child.num_features)
m.BN_S.load_state_dict(child.state_dict())
m.BN_T.load_state_dict(child.state_dict())
setattr(model, child_name, m)
else:
convert_dsbn(child)
def convert_bn(model, use_target=True):
for _, (child_name, child) in enumerate(model.named_children()):
assert(not next(model.parameters()).is_cuda)
if isinstance(child, DSBN2d):
m = nn.BatchNorm2d(child.num_features)
if use_target:
m.load_state_dict(child.BN_T.state_dict())
else:
m.load_state_dict(child.BN_S.state_dict())
setattr(model, child_name, m)
elif isinstance(child, DSBN1d):
m = nn.BatchNorm1d(child.num_features)
if use_target:
m.load_state_dict(child.BN_T.state_dict())
else:
m.load_state_dict(child.BN_S.state_dict())
setattr(model, child_name, m)
else:
convert_bn(child, use_target=use_target)
| 2,669 | 32.797468 | 68 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/__init__.py | from __future__ import absolute_import
from .resnet import *
# from .resnet_sbs import resnet50_sbs
from .resnet_multi import resnet50_multi,resnet50_multi_sbs
__factory = {
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnet50_sbs': resnet50_sbs,
'resnet50_multi': resnet50_multi,
'resnet50_multi_sbs': resnet50_multi_sbs
}
def names():
return sorted(__factory.keys())
def create(name, mb_h=2048, sour_class=751, *args, **kwargs):
"""
Create a model instance.
Parameters
----------
name : str
Model name. Can be one of 'inception', 'resnet18', 'resnet34',
'resnet50', 'resnet101', and 'resnet152'.
pretrained : bool, optional
Only applied for 'resnet*' models. If True, will use ImageNet pretrained
model. Default: True
cut_at_pooling : bool, optional
If True, will cut the model before the last global pooling layer and
ignore the remaining kwargs. Default: False
num_features : int, optional
If positive, will append a Linear layer after the global pooling layer,
with this number of output units, followed by a BatchNorm layer.
Otherwise these layers will not be appended. Default: 256 for
'inception', 0 for 'resnet*'
norm : bool, optional
If True, will normalize the feature to be unit L2-norm for each sample.
Otherwise will append a ReLU layer after the above Linear layer if
num_features > 0. Default: False
dropout : float, optional
If positive, will append a Dropout layer with this dropout rate.
Default: 0
num_classes : int, optional
If positive, will append a Linear layer at the end as the classifier
with this number of output units. Default: 0
"""
if name not in __factory:
raise KeyError("Unknown model:", name)
return __factory[name](mb_h=mb_h,sour_class=sour_class,*args, **kwargs)
| 2,014 | 34.982143 | 80 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/datasets/dukemtmc.py | from __future__ import print_function, absolute_import
import os.path as osp
import glob
import re
import urllib
import zipfile
from ..utils.data import BaseImageDataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class DukeMTMC(BaseImageDataset):
"""
DukeMTMC-reID
Reference:
1. Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
2. Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
URL: https://github.com/layumi/DukeMTMC-reID_evaluation
Dataset statistics:
# identities: 1404 (train + query)
# images:16522 (train) + 2228 (query) + 17661 (gallery)
# cameras: 8
"""
dataset_dir = '.'
def __init__(self, root, ncl=1, verbose=True, **kwargs):
super(DukeMTMC, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip'
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
self.camstyle_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train_camstyle')
self._download_data()
self._check_before_run()
self.ncl=ncl
self.num_cam= 8
# camstytrain = self._process_dir(self.camstylegallery_dir, relabel=True)
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> DukeMTMC-reID loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
print("Creating directory {}".format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading DukeMTMC-reID dataset")
urllib.request.urlretrieve(self.dataset_url, fpath)
print("Extracting files")
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
pids=()
for _ in range(self.ncl):
pids = (pid,)+pids
item = (img_path,) + pids + (camid,)
dataset.append(item)
return dataset
| 4,391 | 38.927273 | 121 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/datasets/msmt17.py | from __future__ import print_function, absolute_import
import os.path as osp
import tarfile
import glob
import re
import urllib
import zipfile
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
style='MSMT17_V1'
def _pluck_msmt(list_file, subdir, ncl, pattern=re.compile(r'([-\d]+)_([-\d]+)_([-\d]+)')):
with open(list_file, 'r') as f:
lines = f.readlines()
ret = []
pids_ = []
for line in lines:
line = line.strip()
fname = line.split(' ')[0]
pid, _, cam = map(int, pattern.search(osp.basename(fname)).groups())
if pid not in pids_:
pids_.append(pid)
img_path=osp.join(subdir,fname)
pids = ()
for _ in range(ncl):
pids = (pid,) + pids
item = (img_path,) + pids + (cam,)
ret.append(item)
# ret.append((osp.join(subdir,fname), pid, cam))
return ret, pids_
class Dataset_MSMT(object):
def __init__(self, root, ncl):
self.root = root
self.train, self.val, self.trainval = [], [], []
self.query, self.gallery = [], []
self.num_train_ids, self.num_val_ids, self.num_trainval_ids = 0, 0, 0
self.ncl=ncl
@property
def images_dir(self):
return osp.join(self.root, style)
def load(self, verbose=True):
exdir = osp.join(self.root, style)
nametrain= 'train'#'mask_train_v2'
nametest = 'test'#'mask_test_v2'
self.train, train_pids = _pluck_msmt(osp.join(exdir, 'list_train.txt'), nametrain, self.ncl)
self.val, val_pids = _pluck_msmt(osp.join(exdir, 'list_val.txt'), nametrain, self.ncl)
self.train = self.train + self.val
self.query, query_pids = _pluck_msmt(osp.join(exdir, 'list_query.txt'), nametest, self.ncl)
self.gallery, gallery_pids = _pluck_msmt(osp.join(exdir, 'list_gallery.txt'), nametest, self.ncl)
self.num_train_pids = len(list(set(train_pids).union(set(val_pids))))
if verbose:
print(self.__class__.__name__, "v1~~~ dataset loaded")
print(" ---------------------------")
print(" subset | # ids | # images")
print(" ---------------------------")
print(" train | {:5d} | {:8d}"
.format(self.num_train_pids, len(self.train)))
print(" query | {:5d} | {:8d}"
.format(len(query_pids), len(self.query)))
print(" gallery | {:5d} | {:8d}"
.format(len(gallery_pids), len(self.gallery)))
print(" ---------------------------")
class MSMT17(Dataset_MSMT):
def __init__(self, root, ncl=1, split_id=0, download=True):
super(MSMT17, self).__init__(root,ncl)
if download:
self.download()
self.load()
def download(self):
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root)
mkdir_if_missing(raw_dir)
# Download the raw zip file
fpath = osp.join(raw_dir, style)
if osp.isdir(fpath):
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please download the dataset manually to {}".format(fpath))
| 3,315 | 32.494949 | 105 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/datasets/personx.py | from __future__ import print_function, absolute_import
import os.path as osp
import glob
import re
import urllib
import zipfile
from ..utils.data import BaseImageDataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class personX(BaseImageDataset):
dataset_dir = '.'
def __init__(self, root, ncl=1, verbose=True, **kwargs):
super(personX, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'challenge_datasets/personX/resultA/')
print("using the spgan data resultA")
self.query_dir = osp.join(self.dataset_dir, 'challenge_datasets/target_validation/image_query/')
self.gallery_dir = osp.join(self.dataset_dir, 'challenge_datasets/target_validation/image_gallery/')
self._check_before_run()
self.ncl=ncl
self.num_cam= 6
# camstytrain = self._process_dir(self.camstylegallery_dir, relabel=True)
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> personx loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
pids=()
for _ in range(self.ncl):
pids=(pid,)+pids
item=(img_path,) + pids + (camid,)
dataset.append(item)
return dataset
| 3,185 | 37.853659 | 115 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/datasets/personxval.py | from __future__ import print_function, absolute_import
import os.path as osp
import glob
import re
import urllib
import zipfile
from ..utils.data import BaseImageDataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class personXval(BaseImageDataset):
dataset_dir = '.'
def __init__(self, root, ncl=1, verbose=True, **kwargs):
super(personXval, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'challenge_datasets/target_training/image_train/')
self.query_dir = osp.join(self.dataset_dir, 'challenge_datasets/target_validation/image_query/')
self.gallery_dir = osp.join(self.dataset_dir, 'challenge_datasets/target_validation/image_gallery/')
self._check_before_run()
self.ncl=ncl
self.num_cam= 6
# camstytrain = self._process_dir(self.camstylegallery_dir, relabel=True)
self.name2camera_path=osp.join(self.dataset_dir, 'challenge_datasets/target_training/label_target_training.txt')
with open(self.name2camera_path,'r') as f:
self.name2camera=f.readlines()
train = self._process_dir_train(self.train_dir,self.name2camera)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> personXval loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir_train(self, dir_path, name2cam):
#img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
# pattern = re.compile(r'([-\d]+)_c(\d)')
dataset = []
for item_pandc in name2cam:
img_path,camid=item_pandc.strip('\n').split(' ')
img_path=osp.join(dir_path,img_path)
pid = 0#map(int, pattern.search(img_path).groups())
camid = int(camid)
pids = ()
for _ in range(self.ncl):
pids = (pid,) + pids
item = (img_path,) + pids + (camid,)
dataset.append(item)
return dataset
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
pids=()
for _ in range(self.ncl):
pids=(pid,)+pids
item=(img_path,) + pids + (camid,)
dataset.append(item)
return dataset
| 4,015 | 38.372549 | 120 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/datasets/market1501.py | from __future__ import print_function, absolute_import
import os.path as osp
import glob
import re
import urllib
import zipfile
from ..utils.data import BaseImageDataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class Market1501(BaseImageDataset):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
"""
dataset_dir = 'Market-1501-v15.09.15'
def __init__(self, root, ncl=1, verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self.ncl = ncl
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> Market1501 loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
pids=()
for _ in range(self.ncl):
pids=(pid,)+pids
item=(img_path,) + pids + (camid,)
dataset.append(item)
# if relabel: pid = pid2label[pid]
# dataset.append((img_path, pid, camid))
return dataset
| 3,578 | 37.902174 | 115 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/datasets/__init__.py | from __future__ import absolute_import
import warnings
from .dukemtmc import DukeMTMC
from .market1501 import Market1501
from .msmt17 import MSMT17
from .personx import personX
from .personxval import personXval
__factory = {
'market1501': Market1501,
'dukemtmc': DukeMTMC,
'msmt17': MSMT17,
'personx': personX,
'personxval': personXval
}
def names():
return sorted(__factory.keys())
def create(name, root, l=1, *args, **kwargs):
"""
Create a dataset instance.
Parameters
----------
name : str
The dataset name. Can be one of 'viper', 'cuhk01', 'cuhk03',
'market1501', and 'dukemtmc'.
root : str
The path to the dataset directory.
split_id : int, optional
The index of data split. Default: 0
num_val : int or float, optional
When int, it means the number of validation identities. When float,
it means the proportion of validation to all the trainval. Default: 100
download : bool, optional
If True, will download the dataset. Default: False
"""
if name not in __factory:
raise KeyError("Unknown dataset:", name)
return __factory[name](root, ncl=l, *args, **kwargs)
def get_dataset(name, root, *args, **kwargs):
warnings.warn("get_dataset is deprecated. Use create instead.")
return create(name, root, *args, **kwargs)
| 1,372 | 26.46 | 79 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/batch_norm.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import logging
import torch
import torch.nn.functional as F
from torch import nn
__all__ = [
"BatchNorm",
"IBN",
"GhostBatchNorm",
"FrozenBatchNorm",
"SyncBatchNorm",
"get_norm",
]
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0,
bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: self.weight.data.fill_(weight_init)
if bias_init is not None: self.bias.data.fill_(bias_init)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
class SyncBatchNorm(nn.SyncBatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0,
bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: self.weight.data.fill_(weight_init)
if bias_init is not None: self.bias.data.fill_(bias_init)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
class IBN(nn.Module):
def __init__(self, planes, bn_norm, num_splits):
super(IBN, self).__init__()
half1 = int(planes / 2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = get_norm(bn_norm, half2, num_splits)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
class GhostBatchNorm(BatchNorm):
def __init__(self, num_features, num_splits=1, **kwargs):
super().__init__(num_features, **kwargs)
self.num_splits = num_splits
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, input):
N, C, H, W = input.shape
if self.training or not self.track_running_stats:
self.running_mean = self.running_mean.repeat(self.num_splits)
self.running_var = self.running_var.repeat(self.num_splits)
outputs = F.batch_norm(
input.view(-1, C * self.num_splits, H, W), self.running_mean, self.running_var,
self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits),
True, self.momentum, self.eps).view(N, C, H, W)
self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0)
self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0)
return outputs
else:
return F.batch_norm(
input, self.running_mean, self.running_var,
self.weight, self.bias, False, self.momentum, self.eps)
class FrozenBatchNorm(BatchNorm):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__(num_features, weight_freeze=True, bias_freeze=True)
self.num_features = num_features
self.eps = eps
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels, num_splits=1, **kwargs):
"""
Args:
norm (str or callable):
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm(out_channels, **kwargs),
"GhostBN": GhostBatchNorm(out_channels, num_splits, **kwargs),
"FrozenBN": FrozenBatchNorm(out_channels),
"GN": nn.GroupNorm(32, out_channels),
"syncBN": SyncBatchNorm(out_channels, **kwargs),
}[norm]
return norm | 8,165 | 39.029412 | 118 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/non_local.py | # encoding: utf-8
import torch
from torch import nn
from .batch_norm import get_norm
class Non_local(nn.Module):
def __init__(self, in_channels, bn_norm, num_splits, reduc_ratio=2):
super(Non_local, self).__init__()
self.in_channels = in_channels
self.inter_channels = reduc_ratio // reduc_ratio
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = nn.Sequential(
nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
get_norm(bn_norm, self.in_channels, num_splits),
)
nn.init.constant_(self.W[1].weight, 0.0)
nn.init.constant_(self.W[1].bias, 0.0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
'''
:param x: (b, t, h, w)
:return x: (b, t, h, w)
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
| 1,901 | 33.581818 | 94 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/__init__.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from torch import nn
# from .batch_drop import BatchDrop
# from .attention import *
from .batch_norm import *
# from .context_block import ContextBlock
from .non_local import Non_local
# from .se_layer import SELayer
# from .frn import FRN, TLU
# from .activation import *
# from .gem_pool import GeneralizedMeanPoolingP, AdaptiveAvgMaxPool2d
# from .arcface import Arcface
# from .circle import Circle
# from .splat import SplAtConv2d
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
| 622 | 23.92 | 69 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y), message)
| 746 | 23.9 | 59 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, weight_freeze=False, bias_freeze=False, affine=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
>>> import torch.nn as nn
>>> import torchvision
>>> # m is a standard pytorch model
>>> m = torchvision.models.resnet18(True)
>>> m = nn.DataParallel(m)
>>> # after convert, m is using SyncBN
>>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod
| 15,978 | 39.35101 | 116 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,385 | 30.813333 | 95 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/comm.py | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,449 | 31.246377 | 117 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/__init__.py | # -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .batchnorm import patch_sync_batchnorm, convert_model
from .replicate import DataParallelWithCallback, patch_replication_callback
| 507 | 35.285714 | 96 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/feature_extraction/cnn.py | from __future__ import absolute_import
from collections import OrderedDict
from ..utils import to_torch
def extract_cnn_feature(model, inputs, modules=None):
model.eval()
# with torch.no_grad():
inputs = to_torch(inputs).cuda()
if modules is None:
outputs = model(inputs)
outputs = outputs.data.cpu()
return outputs
# Register forward hook for each module
outputs = OrderedDict()
handles = []
for m in modules:
outputs[id(m)] = None
def func(m, i, o): outputs[id(m)] = o.data.cpu()
handles.append(m.register_forward_hook(func))
model(inputs)
for h in handles:
h.remove()
return list(outputs.values())
| 705 | 25.148148 | 56 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/feature_extraction/database.py | from __future__ import absolute_import
import h5py
import numpy as np
from torch.utils.data import Dataset
class FeatureDatabase(Dataset):
def __init__(self, *args, **kwargs):
super(FeatureDatabase, self).__init__()
self.fid = h5py.File(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __getitem__(self, keys):
if isinstance(keys, (tuple, list)):
return [self._get_single_item(k) for k in keys]
return self._get_single_item(keys)
def _get_single_item(self, key):
return np.asarray(self.fid[key])
def __setitem__(self, key, value):
if key in self.fid:
if self.fid[key].shape == value.shape and \
self.fid[key].dtype == value.dtype:
self.fid[key][...] = value
else:
del self.fid[key]
self.fid.create_dataset(key, data=value)
else:
self.fid.create_dataset(key, data=value)
def __delitem__(self, key):
del self.fid[key]
def __len__(self):
return len(self.fid)
def __iter__(self):
return iter(self.fid)
def flush(self):
self.fid.flush()
def close(self):
self.fid.close()
| 1,311 | 24.230769 | 59 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/feature_extraction/__init__.py | from __future__ import absolute_import
from .cnn import extract_cnn_feature
from .database import FeatureDatabase
__all__ = [
'extract_cnn_feature',
'FeatureDatabase',
]
| 180 | 17.1 | 38 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/invariance.py | import torch
import torch.nn.functional as F
from torch import nn, autograd
from torch.autograd import Variable, Function
import numpy as np
import math
import warnings
warnings.filterwarnings("ignore")
class ExemplarMemory(Function):
def __init__(self, em, alpha=0.01):
super(ExemplarMemory, self).__init__()
self.em = em
self.alpha = alpha
def forward(self, inputs, targets):
self.save_for_backward(inputs, targets)
outputs = inputs.mm(self.em.t())
return outputs
def backward(self, grad_outputs):
inputs, targets = self.saved_tensors
grad_inputs = None
if self.needs_input_grad[0]:
grad_inputs = grad_outputs.mm(self.em)
for x, y in zip(inputs, targets):
self.em[y] = self.alpha * self.em[y] + (1. - self.alpha) * x
self.em[y] /= self.em[y].norm()
return grad_inputs, None
# Invariance learning loss
class InvNet(nn.Module):
def __init__(self, num_features, num_classes, beta=0.05, knn=6, alpha=0.01):
super(InvNet, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.num_classes = num_classes
self.alpha = alpha # Memory update rate
self.beta = beta # Temperature fact
self.knn = knn # Knn for neighborhood invariance
# Exemplar memory
self.em = nn.Parameter(torch.zeros(num_classes, num_features))
def forward(self, inputs, targets, epoch=None):
alpha = self.alpha * epoch
inputs = ExemplarMemory(self.em, alpha=alpha)(inputs, targets)
inputs /= self.beta
if self.knn > 0:# and epoch > 4:
# With neighborhood invariance
loss = self.smooth_loss(inputs, targets)
else:
# Without neighborhood invariance
loss = F.cross_entropy(inputs, targets)
return loss
def smooth_loss(self, inputs, targets):
targets = self.smooth_hot(inputs.detach().clone(), targets.detach().clone(), self.knn)
outputs = F.log_softmax(inputs, dim=1)
loss = - (targets * outputs)
loss = loss.sum(dim=1)
loss = loss.mean(dim=0)
return loss
def smooth_hot(self, inputs, targets, k=6):
# Sort
_, index_sorted = torch.sort(inputs, dim=1, descending=True)
ones_mat = torch.ones(targets.size(0), k).to(self.device)
targets = torch.unsqueeze(targets, 1)
targets_onehot = torch.zeros(inputs.size()).to(self.device)
weights = F.softmax(ones_mat, dim=1)
targets_onehot.scatter_(1, index_sorted[:, 0:k], ones_mat * weights)
targets_onehot.scatter_(1, targets, float(1))
return targets_onehot
| 2,793 | 31.870588 | 94 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/triplet.py | from __future__ import absolute_import
import torch
from torch import nn
import torch.nn.functional as F
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def cosine_dist(x, y):
bs1, bs2 = x.size(0), y.size(0)
frac_up = torch.matmul(x, y.transpose(0, 1))
frac_down = (torch.sqrt(torch.sum(torch.pow(x, 2), 1))).view(bs1, 1).repeat(1, bs2) * \
(torch.sqrt(torch.sum(torch.pow(y, 2), 1))).view(1, bs2).repeat(bs1, 1)
cosine = frac_up / frac_down
return 1 - cosine
from functools import reduce
def _batch_hard(mat_distance, mat_similarity, indice=False):
# mat_similarity=reduce(lambda x, y: x * y, mat_similaritys)
# mat_similarity=mat_similaritys[0]*mat_similaritys[1]*mat_similaritys[2]*mat_similaritys[3]
sorted_mat_distance, positive_indices = torch.sort(mat_distance + (-9999999.) * (1 - mat_similarity), dim=1,
descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance + (9999999.) * (mat_similarity), dim=1,
descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if (indice):
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
class TripletLoss(nn.Module):
'''
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
'''
def __init__(self, margin, normalize_feature=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin).cuda()
def forward(self, emb, label):
if self.normalize_feature:
# equal to cosine similarity
emb = F.normalize(emb)
mat_dist = euclidean_dist(emb, emb)
# mat_dist = cosine_dist(emb, emb)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)
assert dist_an.size(0) == dist_ap.size(0)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1. / y.size(0)
return loss, prec
def logsumexp(value, weight=1, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(weight * torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(weight * torch.exp(value - m))
return m + torch.log(sum_exp)
class SoftTripletLoss(nn.Module):
def __init__(self, margin=None, normalize_feature=False, uncer_mode=0):
super(SoftTripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.uncer_mode = uncer_mode
def forward(self, emb1, emb2, label, uncertainty):
if self.normalize_feature:
# equal to cosine similarity
emb1 = F.normalize(emb1)
emb2 = F.normalize(emb2)
mat_dist = euclidean_dist(emb1, emb1)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
# mat_sims=[]
# for label in labels:
# mat_sims.append(label.expand(N, N).eq(label.expand(N, N).t()).float())
# mat_sim=reduce(lambda x, y: x + y, mat_sims)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an, ap_idx, an_idx = _batch_hard(mat_dist, mat_sim, indice=True)
assert dist_an.size(0) == dist_ap.size(0)
triple_dist = torch.stack((dist_ap, dist_an), dim=1)
triple_dist = F.log_softmax(triple_dist, dim=1)
# mat_dist_ref = euclidean_dist(emb2, emb2)
# dist_ap_ref = torch.gather(mat_dist_ref, 1, ap_idx.view(N,1).expand(N,N))[:,0]
# dist_an_ref = torch.gather(mat_dist_ref, 1, an_idx.view(N,1).expand(N,N))[:,0]
# triple_dist_ref = torch.stack((dist_ap_ref, dist_an_ref), dim=1)
# triple_dist_ref = F.softmax(triple_dist_ref, dim=1).detach()
# torch.gather
if self.uncer_mode == 0:
uncer_ap_ref = torch.gather(uncertainty, 0, ap_idx) + uncertainty
uncer_an_ref = torch.gather(uncertainty, 0, an_idx) + uncertainty
elif self.uncer_mode == 1:
uncer_ap_ref = max(torch.gather(uncertainty, 0, ap_idx), uncertainty)
uncer_an_ref = max(torch.gather(uncertainty, 0, an_idx), uncertainty)
else:
uncer_ap_ref = min(torch.gather(uncertainty, 0, ap_idx), uncertainty)
uncer_an_ref = min(torch.gather(uncertainty, 0, an_idx), uncertainty)
uncer = torch.stack((uncer_ap_ref, uncer_an_ref), dim=1).detach() / 2.0
loss = (-uncer * triple_dist).mean(0).sum()#(uncer * triple_dist)[:,0].mean(0).sum()-(uncer * triple_dist)[:,1].mean(0).sum() #- triple_dist[:,1].mean()
return loss
class SoftTripletLoss_vallia(nn.Module):
def __init__(self, margin=None, normalize_feature=False):
super(SoftTripletLoss_vallia, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
def forward(self, emb1, emb2, label):
if self.normalize_feature:
# equal to cosine similarity
emb1 = F.normalize(emb1)
emb2 = F.normalize(emb2)
mat_dist = euclidean_dist(emb1, emb1)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an, ap_idx, an_idx = _batch_hard(mat_dist, mat_sim, indice=True)
assert dist_an.size(0) == dist_ap.size(0)
triple_dist = torch.stack((dist_ap, dist_an), dim=1)
triple_dist = F.log_softmax(triple_dist, dim=1)
if (self.margin is not None):
loss = (- self.margin * triple_dist[:, 0] - (1 - self.margin) * triple_dist[:, 1]).mean()
return loss
mat_dist_ref = euclidean_dist(emb2, emb2)
dist_ap_ref = torch.gather(mat_dist_ref, 1, ap_idx.view(N, 1).expand(N, N))[:, 0]
dist_an_ref = torch.gather(mat_dist_ref, 1, an_idx.view(N, 1).expand(N, N))[:, 0]
triple_dist_ref = torch.stack((dist_ap_ref, dist_an_ref), dim=1)
triple_dist_ref = F.softmax(triple_dist_ref, dim=1).detach()
loss = (- triple_dist_ref * triple_dist)[:, 1].mean(0).sum()
return loss
| 7,326 | 39.038251 | 160 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/crossentropy.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon=0.1, reduce=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1).cuda()
self.reduce=reduce
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
if self.reduce:
loss = (- targets * log_probs).mean(0).sum()
else:
loss = (- targets * log_probs)
return loss
class SoftEntropy(nn.Module):
def __init__(self):
super(SoftEntropy, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim=1).cuda()
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
loss = (- F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum()
return loss
| 1,162 | 28.075 | 82 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/multisoftmax.py | import torch
from torch import nn
import torch.nn.functional as F
eps = 1e-7
class NCECriterion(nn.Module):
"""
Eq. (12): L_{memorybank}
"""
def __init__(self, n_data):
super(NCECriterion, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class NCESoftmaxLoss(nn.Module):
"""Softmax cross-entropy loss (a.k.a., info-memorybank loss in CPC paper)"""
def __init__(self):
super(NCESoftmaxLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
bsz = x.shape[0]
x = x.squeeze()
label = torch.zeros([bsz]).cuda().long()
loss = self.criterion(x, label)
return loss
class MultiSoftmaxLoss(nn.Module):
def __init__(self):
super().__init__()
# self.criterion = nn.KLDivLoss(reduction='batchmean')
self.criterion = nn.CrossEntropyLoss()
# self.criterion = nn.NLLLoss(reduction='mean')
def forward(self, x, is_pos):
bsz = x.shape[0]
# ce_loss = self.criterion(x, torch.zeros([bsz]).cuda().long())
x = x.squeeze()
x = torch.exp(x)
is_pos = torch.cat((torch.ones([bsz, 1], dtype=torch.long).cuda(), is_pos.long()), dim=1)
is_neg = (1 - is_pos).float()
neg_div = (x * is_neg).sum(dim=1, keepdim=True)
x_logit = x / (x + neg_div)
x_logit = -torch.log(x_logit)
x_mask = x_logit * is_pos.float()
num_pos = is_pos.sum(dim=1, keepdim=True).float()
x_mask = x_mask / num_pos
loss = x_mask.sum(dim=1).mean(dim=0)
return loss
# loss = 0
# for i in range(bsz):
# tmp_loss = 0
# pos_inds = torch.where(is_pos[i] == 1)[0].tolist()
# num_pos = len(pos_inds)
# for j in pos_inds:
# tmp_loss -= torch.log(x[i, j] / (neg_div[i][0] + x[i, j]))
# loss += (tmp_loss / num_pos)
# loss = loss / bsz
#
# print(loss)
# print(fast_loss)
# from ipdb import set_trace; set_trace()
# print(ce_loss)
# print(loss)
# def forward(self, x, is_pos):
# is_pos = is_pos.float()
# bsz = x.shape[0]
# x = x.squeeze()
#
# label = torch.zeros([bsz]).cuda().long()
# # loss = self.criterion1(x, ce_label)
#
# # from ipdb import set_trace; set_trace()
# # is_neg = 1 - is_pos[:, 1:]
# x = F.softmax(x, dim=1)
# x = (x * is_pos).sum(dim=1, keepdim=True)
# # neg_logit = (x * is_neg)
# # x = torch.cat((pos_logit, x[:, 1:]), dim=1) # [bsz, 16385]
# # x = torch.log(x)
#
# loss = self.criterion(x.log(), label)
# return loss
# x = F.softmax(x, dim=1)
# label = torch.cat((torch.ones([bsz, 1], dtype=torch.float32).cuda(), is_pos), dim=1) # (bsz, dim)
# label = F.softmax(label, dim=1)
# label = label / label.sum(dim=1, keepdim=True)
# loss = torch.sum(x * torch.log(1e-9 + x / (label + 1e-9)), dim=1).mean(dim=0)
# loss = torch.sum(x * (1e-9 + torch.log(x) - torch.log(label + 1e-9)), dim=1).mean(dim=0)
# from ipdb import set_trace; set_trace()
# loss = self.criterion(x, label)
# return loss
| 3,800 | 29.166667 | 108 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/__init__.py | from __future__ import absolute_import
from .triplet import SoftTripletLoss_vallia, SoftTripletLoss
from .crossentropy import CrossEntropyLabelSmooth, SoftEntropy
from .multisoftmax import MultiSoftmaxLoss
from .invariance import InvNet
__all__ = [
'SoftTripletLoss_vallia',
'CrossEntropyLabelSmooth',
'SoftTripletLoss',
'SoftEntropy',
'MultiSoftmaxLoss',
'InvNet',
]
| 393 | 25.266667 | 62 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/metric_learning/kissme.py | from __future__ import absolute_import
import numpy as np
from metric_learn.base_metric import BaseMetricLearner
def validate_cov_matrix(M):
M = (M + M.T) * 0.5
k = 0
I = np.eye(M.shape[0])
while True:
try:
_ = np.linalg.cholesky(M)
break
except np.linalg.LinAlgError:
# Find the nearest positive definite matrix for M. Modified from
# http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
# Might take several minutes
k += 1
w, v = np.linalg.eig(M)
min_eig = v.min()
M += (-min_eig * k * k + np.spacing(min_eig)) * I
return M
class KISSME(BaseMetricLearner):
def __init__(self):
self.M_ = None
def metric(self):
return self.M_
def fit(self, X, y=None):
n = X.shape[0]
if y is None:
y = np.arange(n)
X1, X2 = np.meshgrid(np.arange(n), np.arange(n))
X1, X2 = X1[X1 < X2], X2[X1 < X2]
matches = (y[X1] == y[X2])
num_matches = matches.sum()
num_non_matches = len(matches) - num_matches
idxa = X1[matches]
idxb = X2[matches]
S = X[idxa] - X[idxb]
C1 = S.transpose().dot(S) / num_matches
p = np.random.choice(num_non_matches, num_matches, replace=False)
idxa = X1[~matches]
idxb = X2[~matches]
idxa = idxa[p]
idxb = idxb[p]
S = X[idxa] - X[idxb]
C0 = S.transpose().dot(S) / num_matches
self.M_ = np.linalg.inv(C1) - np.linalg.inv(C0)
self.M_ = validate_cov_matrix(self.M_)
self.X_ = X
| 1,654 | 28.553571 | 82 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/metric_learning/euclidean.py | from __future__ import absolute_import
import numpy as np
from metric_learn.base_metric import BaseMetricLearner
class Euclidean(BaseMetricLearner):
def __init__(self):
self.M_ = None
def metric(self):
return self.M_
def fit(self, X):
self.M_ = np.eye(X.shape[1])
self.X_ = X
def transform(self, X=None):
if X is None:
return self.X_
return X
| 425 | 18.363636 | 54 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/metric_learning/__init__.py | from __future__ import absolute_import
from metric_learn import (ITML_Supervised, LMNN, LSML_Supervised,
SDML_Supervised, NCA, LFDA, RCA_Supervised)
from .euclidean import Euclidean
from .kissme import KISSME
__factory = {
'euclidean': Euclidean,
'kissme': KISSME,
'itml': ITML_Supervised,
'lmnn': LMNN,
'lsml': LSML_Supervised,
'sdml': SDML_Supervised,
'nca': NCA,
'lfda': LFDA,
'rca': RCA_Supervised,
}
def get_metric(algorithm, *args, **kwargs):
if algorithm not in __factory:
raise KeyError("Unknown metric:", algorithm)
return __factory[algorithm](*args, **kwargs)
| 653 | 24.153846 | 69 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/metric_learning/distance.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
from torch.nn import functional as F
def compute_distance_matrix(input1, input2, metric='euclidean'):
"""A wrapper function for computing distance matrix.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
metric (str, optional): "euclidean" or "cosine".
Default is "euclidean".
Returns:
torch.Tensor: distance matrix.
Examples::
>>> from torchreid import metrics
>>> input1 = torch.rand(10, 2048)
>>> input2 = torch.rand(100, 2048)
>>> distmat = metrics.compute_distance_matrix(input1, input2)
>>> distmat.size() # (10, 100)
"""
# check input
assert isinstance(input1, torch.Tensor)
assert isinstance(input2, torch.Tensor)
assert input1.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(input1.dim())
assert input2.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(input2.dim())
assert input1.size(1) == input2.size(1)
if metric == 'euclidean':
distmat = euclidean_squared_distance(input1, input2)
elif metric == 'cosine':
distmat = cosine_distance(input1, input2)
else:
raise ValueError(
'Unknown distance metric: {}. '
'Please choose either "euclidean" or "cosine"'.format(metric)
)
return distmat
def euclidean_squared_distance(input1, input2):
"""Computes euclidean squared distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
m, n = input1.size(0), input2.size(0)
distmat = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, input1, input2.t())
return distmat
def cosine_distance(input1, input2):
"""Computes cosine distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
input1_normed = F.normalize(input1, p=2, dim=1)
input2_normed = F.normalize(input2, p=2, dim=1)
distmat = 1 - torch.mm(input1_normed, input2_normed.t())
return distmat
| 2,454 | 31.733333 | 86 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/lr_scheduler.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from bisect import bisect_right
import torch
from torch.optim.lr_scheduler import *
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / float(self.warmup_iters)
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 1,807 | 30.172414 | 80 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/loss_and_miner_utils.py | import torch
import numpy as np
import math
from . import common_functions as c_f
def logsumexp(x, keep_mask=None, add_one=True, dim=1):
max_vals, _ = torch.max(x, dim=dim, keepdim=True)
inside_exp = x - max_vals
exp = torch.exp(inside_exp)
if keep_mask is not None:
exp = exp*keep_mask
inside_log = torch.sum(exp, dim=dim, keepdim=True)
if add_one:
inside_log = inside_log + torch.exp(-max_vals)
else:
# add one only if necessary
inside_log[inside_log==0] = torch.exp(-max_vals[inside_log==0])
return torch.log(inside_log) + max_vals
def sim_mat(x, y=None):
"""
returns a matrix where entry (i,j) is the dot product of x[i] and x[j]
"""
if y is None:
y = x
return torch.matmul(x, y.t())
# https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/7
def dist_mat(x, y=None, eps=1e-16, squared=False):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j]
is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||
"""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
if y is None:
dist = dist - torch.diag(dist.diag())
dist = torch.clamp(dist, 0.0, np.inf)
if not squared:
mask = (dist == 0).float()
dist = dist + mask * eps
dist = torch.sqrt(dist)
dist = dist * (1.0 - mask)
return dist
def get_pairwise_mat(x, y, use_similarity, squared):
if x is y:
y = None
return sim_mat(x, y=y) if use_similarity else dist_mat(x, y=y, squared=squared)
def get_all_pairs_indices(labels, ref_labels=None):
"""
Given a tensor of labels, this will return 4 tensors.
The first 2 tensors are the indices which form all positive pairs
The second 2 tensors are the indices which form all negative pairs
"""
if ref_labels is None:
ref_labels = labels
labels1 = labels.unsqueeze(1)
labels2 = ref_labels.unsqueeze(0)
matches = (labels1 == labels2).byte()
diffs = matches ^ 1
if ref_labels is labels:
matches -= torch.eye(matches.size(0)).byte().to(labels.device)
a1_idx = matches.nonzero()[:, 0].flatten()
p_idx = matches.nonzero()[:, 1].flatten()
a2_idx = diffs.nonzero()[:, 0].flatten()
n_idx = diffs.nonzero()[:, 1].flatten()
return a1_idx, p_idx, a2_idx, n_idx
def convert_to_pairs(indices_tuple, labels):
"""
This returns anchor-positive and anchor-negative indices,
regardless of what the input indices_tuple is
Args:
indices_tuple: tuple of tensors. Each tensor is 1d and specifies indices
within a batch
labels: a tensor which has the label for each element in a batch
"""
if indices_tuple is None:
return get_all_pairs_indices(labels)
elif len(indices_tuple) == 4:
return indices_tuple
else:
a, p, n = indices_tuple
return a, p, a, n
def convert_to_pos_pairs_with_unique_labels(indices_tuple, labels):
a, p, _, _ = convert_to_pairs(indices_tuple, labels)
_, unique_idx = np.unique(labels[a].cpu().numpy(), return_index=True)
return a[unique_idx], p[unique_idx]
def get_all_triplets_indices(labels, ref_labels=None):
if ref_labels is None:
ref_labels = labels
labels1 = labels.unsqueeze(1)
labels2 = ref_labels.unsqueeze(0)
matches = (labels1 == labels2).byte()
diffs = matches ^ 1
if ref_labels is labels:
matches -= torch.eye(matches.size(0)).byte().to(labels.device)
triplets = matches.unsqueeze(2)*diffs.unsqueeze(1)
a_idx = triplets.nonzero()[:, 0].flatten()
p_idx = triplets.nonzero()[:, 1].flatten()
n_idx = triplets.nonzero()[:, 2].flatten()
return a_idx, p_idx, n_idx
# sample triplets, with a weighted distribution if weights is specified.
def get_random_triplet_indices(labels, ref_labels=None, t_per_anchor=None, weights=None):
a_idx, p_idx, n_idx = [], [], []
labels = labels.cpu().numpy()
ref_labels = labels if ref_labels is None else ref_labels.cpu().numpy()
batch_size = ref_labels.shape[0]
label_count = dict(zip(*np.unique(ref_labels, return_counts=True)))
indices = np.arange(batch_size)
for i, label in enumerate(labels):
curr_label_count = label_count[label]
if ref_labels is labels: curr_label_count -= 1
if curr_label_count == 0:
continue
k = curr_label_count if t_per_anchor is None else t_per_anchor
if weights is not None and not np.any(np.isnan(weights[i])):
n_idx += c_f.NUMPY_RANDOM.choice(batch_size, k, p=weights[i]).tolist()
else:
possible_n_idx = list(np.where(ref_labels != label)[0])
n_idx += c_f.NUMPY_RANDOM.choice(possible_n_idx, k).tolist()
a_idx.extend([i] * k)
curr_p_idx = c_f.safe_random_choice(np.where((ref_labels == label) & (indices != i))[0], k)
p_idx.extend(curr_p_idx.tolist())
return (
torch.LongTensor(a_idx),
torch.LongTensor(p_idx),
torch.LongTensor(n_idx),
)
def repeat_to_match_size(smaller_set, larger_size, smaller_size):
num_repeat = math.ceil(float(larger_size) / float(smaller_size))
return smaller_set.repeat(num_repeat)[:larger_size]
def matched_size_indices(curr_p_idx, curr_n_idx):
num_pos_pairs = len(curr_p_idx)
num_neg_pairs = len(curr_n_idx)
if num_pos_pairs > num_neg_pairs:
n_idx = repeat_to_match_size(curr_n_idx, num_pos_pairs, num_neg_pairs)
p_idx = curr_p_idx
else:
p_idx = repeat_to_match_size(curr_p_idx, num_neg_pairs, num_pos_pairs)
n_idx = curr_n_idx
return p_idx, n_idx
def convert_to_triplets(indices_tuple, labels, t_per_anchor=100):
"""
This returns anchor-positive-negative triplets
regardless of what the input indices_tuple is
"""
if indices_tuple is None:
if t_per_anchor == "all":
return get_all_triplets_indices(labels)
else:
return get_random_triplet_indices(labels, t_per_anchor=t_per_anchor)
elif len(indices_tuple) == 3:
return indices_tuple
else:
a_out, p_out, n_out = [], [], []
a1, p, a2, n = indices_tuple
if len(a1) == 0 or len(a2) == 0:
return [torch.tensor([]).to(labels.device)] * 3
for i in range(len(labels)):
pos_idx = (a1 == i).nonzero().flatten()
neg_idx = (a2 == i).nonzero().flatten()
if len(pos_idx) > 0 and len(neg_idx) > 0:
p_idx = p[pos_idx]
n_idx = n[neg_idx]
p_idx, n_idx = matched_size_indices(p_idx, n_idx)
a_idx = torch.ones_like(c_f.longest_list([p_idx, n_idx])) * i
a_out.append(a_idx)
p_out.append(p_idx)
n_out.append(n_idx)
return [torch.cat(x, dim=0) for x in [a_out, p_out, n_out]]
def convert_to_weights(indices_tuple, labels):
"""
Returns a weight for each batch element, based on
how many times they appear in indices_tuple.
"""
weights = torch.zeros_like(labels).float()
if indices_tuple is None:
return weights + 1
indices, counts = torch.unique(torch.cat(indices_tuple, dim=0), return_counts=True)
counts = (counts.float() / torch.sum(counts)) * len(labels) # multiply by number of labels to scale weights up
weights[indices] = counts
return weights | 7,816 | 34.694064 | 114 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/common_functions.py | import collections
import torch
from torch.autograd import Variable
import numpy as np
import os
import logging
import glob
import scipy.stats
import re
NUMPY_RANDOM = np.random
class Identity(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
def try_next_on_generator(gen, iterable):
try:
return gen, next(gen)
except StopIteration:
gen = iter(iterable)
return gen, next(gen)
def numpy_to_torch(v):
try:
return torch.from_numpy(v)
except AttributeError:
return v
def to_numpy(v):
if isinstance(v, tuple):
return np.array(v)
try:
return v.cpu().numpy()
except AttributeError:
return v
def wrap_variable(batch_data, device):
return Variable(batch_data).to(device)
def get_hierarchy_label(batch_labels, hierarchy_level):
if hierarchy_level == "all":
return batch_labels
try:
if batch_labels.ndim == 2:
batch_labels = batch_labels[:, hierarchy_level]
return batch_labels
except AttributeError:
return batch_labels
def map_labels(label_map, labels):
labels = to_numpy(labels)
if labels.ndim == 2:
for h in range(labels.shape[1]):
labels[:, h] = label_map(labels[:, h], h)
else:
labels = label_map(labels, 0)
return labels
def process_label(labels, hierarchy_level, label_map):
labels = map_labels(label_map, labels)
labels = get_hierarchy_label(labels, hierarchy_level)
labels = numpy_to_torch(labels)
return labels
def pass_data_to_model(model, data, device, **kwargs):
return model(wrap_variable(data, device), **kwargs)
def set_requires_grad(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def safe_random_choice(input_data, size):
"""
Randomly samples without replacement from a sequence. It is "safe" because
if len(input_data) < size, it will randomly sample WITH replacement
Args:
input_data is a sequence, like a torch tensor, numpy array,
python list, tuple etc
size is the number of elements to randomly sample from input_data
Returns:
An array of size "size", randomly sampled from input_data
"""
replace = len(input_data) < size
return NUMPY_RANDOM.choice(input_data, size=size, replace=replace)
def longest_list(list_of_lists):
return max(list_of_lists, key=len)
def slice_by_n(input_array, n):
output = []
for i in range(n):
output.append(input_array[i::n])
return output
def unslice_by_n(input_tensors):
n = len(input_tensors)
rows, cols = input_tensors[0].size()
output = torch.zeros((rows * n, cols)).to(input_tensors[0].device)
for i in range(n):
output[i::n] = input_tensors[i]
return output
def set_layers_to_eval(layer_name):
def set_to_eval(m):
classname = m.__class__.__name__
if classname.find(layer_name) != -1:
m.eval()
return set_to_eval
def get_train_dataloader(dataset, batch_size, sampler, num_workers, collate_fn):
return torch.utils.data.DataLoader(
dataset,
batch_size=int(batch_size),
sampler=sampler,
drop_last=True,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=sampler is None,
pin_memory=False
)
def get_eval_dataloader(dataset, batch_size, num_workers, collate_fn):
return torch.utils.data.DataLoader(
dataset,
batch_size=int(batch_size),
drop_last=False,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=False,
pin_memory=False
)
def try_torch_operation(torch_op, input_val):
return torch_op(input_val) if torch.is_tensor(input_val) else input_val
def get_labels_to_indices(labels):
"""
Creates labels_to_indices, which is a dictionary mapping each label
to a numpy array of indices that will be used to index into self.dataset
"""
labels_to_indices = collections.defaultdict(list)
for i, label in enumerate(labels):
labels_to_indices[label].append(i)
for k, v in labels_to_indices.items():
labels_to_indices[k] = np.array(v, dtype=np.int)
return labels_to_indices
def make_label_to_rank_dict(label_set):
"""
Args:
label_set: type sequence, a set of integer labels
(no duplicates in the sequence)
Returns:
A dictionary mapping each label to its numeric rank in the original set
"""
ranked = scipy.stats.rankdata(label_set) - 1
return {k: v for k, v in zip(label_set, ranked)}
def get_label_map(labels):
# Returns a nested dictionary.
# First level of dictionary represents label hierarchy level.
# Second level is the label map for that hierarchy level
labels = np.array(labels)
if labels.ndim == 2:
label_map = {}
for hierarchy_level in range(labels.shape[1]):
label_map[hierarchy_level] = make_label_to_rank_dict(list(set(labels[:, hierarchy_level])))
return label_map
return {0: make_label_to_rank_dict(list(set(labels)))}
class LabelMapper:
def __init__(self, set_min_label_to_zero=False, dataset_labels=None):
self.set_min_label_to_zero = set_min_label_to_zero
if dataset_labels is not None:
self.label_map = get_label_map(dataset_labels)
def map(self, labels, hierarchy_level):
if not self.set_min_label_to_zero:
return labels
else:
return np.array([self.label_map[hierarchy_level][x] for x in labels], dtype=np.int)
def add_to_recordable_attributes(input_obj, name=None, list_of_names=None):
if not hasattr(input_obj, "record_these"):
input_obj.record_these = []
if name is not None:
if name not in input_obj.record_these:
input_obj.record_these.append(name)
if not hasattr(input_obj, name):
setattr(input_obj, name, 0)
if list_of_names is not None and isinstance(list_of_names, list):
for n in list_of_names:
add_to_recordable_attributes(input_obj, name=n)
def modelpath_creator(folder, basename, identifier, extension=".pth"):
if identifier is None:
return os.path.join(folder, basename + extension)
else:
return os.path.join(folder, "%s_%s%s" % (basename, str(identifier), extension))
def save_model(model, model_name, filepath):
if any(isinstance(model, x) for x in [torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel]):
torch.save(model.module.state_dict(), filepath)
else:
torch.save(model.state_dict(), filepath)
def load_model(model_def, model_filename, device):
try:
model_def.load_state_dict(torch.load(model_filename, map_location=device))
except KeyError:
# original saved file with DataParallel
state_dict = torch.load(model_filename)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model_def.load_state_dict(new_state_dict)
def operate_on_dict_of_models(input_dict, suffix, folder, operation, logging_string='', log_if_successful=False):
for k, v in input_dict.items():
model_path = modelpath_creator(folder, k, suffix)
try:
operation(k, v, model_path)
if log_if_successful:
logging.info("%s %s" % (logging_string, model_path))
except IOError:
logging.warn("Could not %s %s" % (logging_string, model_path))
def save_dict_of_models(input_dict, suffix, folder):
def operation(k, v, model_path):
save_model(v, k, model_path)
operate_on_dict_of_models(input_dict, suffix, folder, operation, "SAVE")
def load_dict_of_models(input_dict, suffix, folder, device):
def operation(k, v, model_path):
load_model(v, model_path, device)
operate_on_dict_of_models(input_dict, suffix, folder, operation, "LOAD", log_if_successful=True)
def delete_dict_of_models(input_dict, suffix, folder):
def operation(k, v, model_path):
if os.path.exists(model_path): os.remove(model_path)
operate_on_dict_of_models(input_dict, suffix, folder, operation, "DELETE")
def latest_version(folder, string_to_glob):
items = glob.glob(os.path.join(folder, string_to_glob))
if items == []:
return None
items = [x for x in items if not x.endswith("best.pth")]
version = [int(x.split("_")[-1].split(".")[0]) for x in items]
return max(version)
def return_input(x):
return x
def regex_wrapper(x):
if isinstance(x, list):
return [re.compile(z) for z in x]
return re.compile(x)
def angle_to_coord(angle):
x = np.cos(np.radians(angle))
y = np.sin(np.radians(angle))
return x, y | 9,084 | 28.306452 | 113 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/logging.py | from __future__ import absolute_import
import os
import sys
from .osutils import mkdir_if_missing
class Logger(object):
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
| 876 | 20.925 | 52 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/faiss_rerank.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
"""
import os, sys
import time
import numpy as np
from scipy.spatial.distance import cdist
import gc
import faiss
import torch
import torch.nn.functional as F
from .faiss_utils import search_index_pytorch, search_raw_array_pytorch, \
index_init_gpu, index_init_cpu
def k_reciprocal_neigh(initial_rank, i, k1):
forward_k_neigh_index = initial_rank[i,:k1+1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1]
fi = np.where(backward_k_neigh_index==i)[0]
return forward_k_neigh_index[fi]
def compute_jaccard_distance(target_features, k1=20, k2=6, print_flag=True, search_option=0, use_float16=False):
end = time.time()
if print_flag:
print('Computing jaccard distance...')
ngpus = faiss.get_num_gpus()
N = target_features.size(0)
mat_type = np.float16 if use_float16 else np.float32
if (search_option==0):
# GPU + PyTorch CUDA Tensors (1)
res = faiss.StandardGpuResources()
res.setDefaultNullStreamAllDevices()
_, initial_rank = search_raw_array_pytorch(res, target_features, target_features, k1)
initial_rank = initial_rank.cpu().numpy()
elif (search_option==1):
# GPU + PyTorch CUDA Tensors (2)
res = faiss.StandardGpuResources()
index = faiss.GpuIndexFlatL2(res, target_features.size(-1))
index.add(target_features.cpu().numpy())
_, initial_rank = search_index_pytorch(index, target_features, k1)
res.syncDefaultStreamCurrentDevice()
initial_rank = initial_rank.cpu().numpy()
elif (search_option==2):
# GPU
index = index_init_gpu(ngpus, target_features.size(-1))
index.add(target_features.cpu().numpy())
_, initial_rank = index.search(target_features.cpu().numpy(), k1)
else:
# CPU
index = index_init_cpu(target_features.size(-1))
index.add(target_features.cpu().numpy())
_, initial_rank = index.search(target_features.cpu().numpy(), k1)
nn_k1 = []
nn_k1_half = []
for i in range(N):
nn_k1.append(k_reciprocal_neigh(initial_rank, i, k1))
nn_k1_half.append(k_reciprocal_neigh(initial_rank, i, int(np.around(k1/2))))
V = np.zeros((N, N), dtype=mat_type)
for i in range(N):
k_reciprocal_index = nn_k1[i]
k_reciprocal_expansion_index = k_reciprocal_index
for candidate in k_reciprocal_index:
candidate_k_reciprocal_index = nn_k1_half[candidate]
if (len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index)) > 2/3*len(candidate_k_reciprocal_index)):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) ## element-wise unique
dist = 2-2*torch.mm(target_features[i].unsqueeze(0).contiguous(), target_features[k_reciprocal_expansion_index].t())
if use_float16:
V[i,k_reciprocal_expansion_index] = F.softmax(-dist, dim=1).view(-1).cpu().numpy().astype(mat_type)
else:
V[i,k_reciprocal_expansion_index] = F.softmax(-dist, dim=1).view(-1).cpu().numpy()
del nn_k1, nn_k1_half
if k2 != 1:
V_qe = np.zeros_like(V, dtype=mat_type)
for i in range(N):
V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(N):
invIndex.append(np.where(V[:,i] != 0)[0]) #len(invIndex)=all_num
jaccard_dist = np.zeros((N, N), dtype=mat_type)
for i in range(N):
temp_min = np.zeros((1,N), dtype=mat_type)
# temp_max = np.zeros((1,N), dtype=mat_type)
indNonZero = np.where(V[i,:] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])
# temp_max[0,indImages[j]] = temp_max[0,indImages[j]]+np.maximum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])
jaccard_dist[i] = 1-temp_min/(2-temp_min)
# jaccard_dist[i] = 1-temp_min/(temp_max+1e-6)
del invIndex, V
pos_bool = (jaccard_dist < 0)
jaccard_dist[pos_bool] = 0.0
if print_flag:
print ("Jaccard distance computing time cost: {}".format(time.time()-end))
return jaccard_dist
| 4,838 | 38.663934 | 126 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/faiss_utils.py | import os
import numpy as np
import faiss
import torch
def swig_ptr_from_FloatTensor(x):
assert x.is_contiguous()
assert x.dtype == torch.float32
return faiss.cast_integer_to_float_ptr(
x.storage().data_ptr() + x.storage_offset() * 4)
def swig_ptr_from_LongTensor(x):
assert x.is_contiguous()
assert x.dtype == torch.int64, 'dtype=%s' % x.dtype
return faiss.cast_integer_to_long_ptr(
x.storage().data_ptr() + x.storage_offset() * 8)
def search_index_pytorch(index, x, k, D=None, I=None):
"""call the search function of an index with pytorch tensor I/O (CPU
and GPU supported)"""
assert x.is_contiguous()
n, d = x.size()
assert d == index.d
if D is None:
D = torch.empty((n, k), dtype=torch.float32, device=x.device)
else:
assert D.size() == (n, k)
if I is None:
I = torch.empty((n, k), dtype=torch.int64, device=x.device)
else:
assert I.size() == (n, k)
torch.cuda.synchronize()
xptr = swig_ptr_from_FloatTensor(x)
Iptr = swig_ptr_from_LongTensor(I)
Dptr = swig_ptr_from_FloatTensor(D)
index.search_c(n, xptr,
k, Dptr, Iptr)
torch.cuda.synchronize()
return D, I
def search_raw_array_pytorch(res, xb, xq, k, D=None, I=None,
metric=faiss.METRIC_L2):
assert xb.device == xq.device
nq, d = xq.size()
if xq.is_contiguous():
xq_row_major = True
elif xq.t().is_contiguous():
xq = xq.t() # I initially wrote xq:t(), Lua is still haunting me :-)
xq_row_major = False
else:
raise TypeError('matrix should be row or column-major')
xq_ptr = swig_ptr_from_FloatTensor(xq)
nb, d2 = xb.size()
assert d2 == d
if xb.is_contiguous():
xb_row_major = True
elif xb.t().is_contiguous():
xb = xb.t()
xb_row_major = False
else:
raise TypeError('matrix should be row or column-major')
xb_ptr = swig_ptr_from_FloatTensor(xb)
if D is None:
D = torch.empty(nq, k, device=xb.device, dtype=torch.float32)
else:
assert D.shape == (nq, k)
assert D.device == xb.device
if I is None:
I = torch.empty(nq, k, device=xb.device, dtype=torch.int64)
else:
assert I.shape == (nq, k)
assert I.device == xb.device
D_ptr = swig_ptr_from_FloatTensor(D)
I_ptr = swig_ptr_from_LongTensor(I)
faiss.bruteForceKnn(res, metric,
xb_ptr, xb_row_major, nb,
xq_ptr, xq_row_major, nq,
d, k, D_ptr, I_ptr)
return D, I
def index_init_gpu(ngpus, feat_dim):
flat_config = []
for i in range(ngpus):
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = i
flat_config.append(cfg)
res = [faiss.StandardGpuResources() for i in range(ngpus)]
indexes = [faiss.GpuIndexFlatL2(res[i], feat_dim, flat_config[i]) for i in range(ngpus)]
index = faiss.IndexShards(feat_dim)
for sub_index in indexes:
index.add_shard(sub_index)
index.reset()
return index
def index_init_cpu(feat_dim):
return faiss.IndexFlatL2(feat_dim)
| 3,182 | 28.201835 | 92 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/__init__.py | from __future__ import absolute_import
import torch
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
| 594 | 26.045455 | 60 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/rerank.py | #!/usr/bin/env python2/python3
# -*- coding: utf-8 -*-
"""
Source: https://github.com/zhunzhong07/person-re-ranking
Created on Mon Jun 26 14:46:56 2017
@author: luohao
Modified by Yixiao Ge, 2020-3-14.
CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
API
q_g_dist: query-gallery distance matrix, numpy array, shape [num_query, num_gallery]
q_q_dist: query-query distance matrix, numpy array, shape [num_query, num_query]
g_g_dist: gallery-gallery distance matrix, numpy array, shape [num_gallery, num_gallery]
k1, k2, lambda_value: parameters, the original paper is (k1=20, k2=6, lambda_value=0.3)
Returns:
final_dist: re-ranked distance, numpy array, shape [num_query, num_gallery]
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
__all__ = ['re_ranking']
import numpy as np
import time
import torch
import torch.nn.functional as F
def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):
# The following naming, e.g. gallery_num, is different from outer scope.
# Don't care about it.
original_dist = np.concatenate(
[np.concatenate([q_q_dist, q_g_dist], axis=1),
np.concatenate([q_g_dist.T, g_g_dist], axis=1)],
axis=0)
original_dist = np.power(original_dist, 2).astype(np.float32)
original_dist = np.transpose(1. * original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float32)
initial_rank = np.argsort(original_dist).astype(np.int32)
query_num = q_g_dist.shape[0]
gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]
all_num = gallery_num
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2.)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2.)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = 1. * weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float32)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float32)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
def k_reciprocal_neigh(initial_rank, i, k1):
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = torch.nonzero(backward_k_neigh_index == i)[:, 0]
return forward_k_neigh_index[fi]
def compute_jaccard_dist(target_features, k1=20, k2=6, print_flag=True,
lambda_value=0, source_features=None, use_gpu=False):
end = time.time()
N = target_features.size(0)
if (use_gpu):
# accelerate matrix distance computing
target_features = target_features.cuda()
if (source_features is not None):
source_features = source_features.cuda()
if ((lambda_value > 0) and (source_features is not None)):
M = source_features.size(0)
sour_tar_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True).expand(N, M) + \
torch.pow(source_features, 2).sum(dim=1, keepdim=True).expand(M, N).t()
sour_tar_dist.addmm_(1, -2, target_features, source_features.t())
sour_tar_dist = 1 - torch.exp(-sour_tar_dist)
sour_tar_dist = sour_tar_dist.cpu()
source_dist_vec = sour_tar_dist.min(1)[0]
del sour_tar_dist
source_dist_vec /= source_dist_vec.max()
source_dist = torch.zeros(N, N)
for i in range(N):
source_dist[i, :] = source_dist_vec + source_dist_vec[i]
del source_dist_vec
if print_flag:
print('Computing original distance...')
original_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True) * 2
original_dist = original_dist.expand(N, N) - 2 * torch.mm(target_features, target_features.t())
original_dist /= original_dist.max(0)[0]
original_dist = original_dist.t()
initial_rank = torch.argsort(original_dist, dim=-1)
original_dist = original_dist.cpu()
initial_rank = initial_rank.cpu()
all_num = gallery_num = original_dist.size(0)
del target_features
if (source_features is not None):
del source_features
if print_flag:
print('Computing Jaccard distance...')
nn_k1 = []
nn_k1_half = []
for i in range(all_num):
nn_k1.append(k_reciprocal_neigh(initial_rank, i, k1))
nn_k1_half.append(k_reciprocal_neigh(initial_rank, i, int(np.around(k1 / 2))))
V = torch.zeros(all_num, all_num)
for i in range(all_num):
k_reciprocal_index = nn_k1[i]
k_reciprocal_expansion_index = k_reciprocal_index
for candidate in k_reciprocal_index:
candidate_k_reciprocal_index = nn_k1_half[candidate]
if (len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index)):
k_reciprocal_expansion_index = torch.cat((k_reciprocal_expansion_index, candidate_k_reciprocal_index))
k_reciprocal_expansion_index = torch.unique(k_reciprocal_expansion_index) ## element-wise unique
weight = torch.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / torch.sum(weight)
if k2 != 1:
k2_rank = initial_rank[:, :k2].clone().view(-1)
V_qe = V[k2_rank]
V_qe = V_qe.view(initial_rank.size(0), k2, -1).sum(1)
V_qe /= k2
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(torch.nonzero(V[:, i])[:, 0]) # len(invIndex)=all_num
jaccard_dist = torch.zeros_like(original_dist)
for i in range(all_num):
temp_min = torch.zeros(1, gallery_num)
indNonZero = torch.nonzero(V[i, :])[:, 0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + torch.min(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
del invIndex
del V
pos_bool = (jaccard_dist < 0)
jaccard_dist[pos_bool] = 0.0
if print_flag:
print("Time cost: {}".format(time.time() - end))
if (lambda_value > 0):
return jaccard_dist * (1 - lambda_value) + source_dist * lambda_value
else:
return jaccard_dist | 8,856 | 41.37799 | 119 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/meters.py | from __future__ import absolute_import
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | 496 | 20.608696 | 59 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/serialization.py | from __future__ import print_function, absolute_import
import json
import os.path as osp
import shutil
import torch
from torch.nn import Parameter
from .osutils import mkdir_if_missing
def read_json(fpath):
with open(fpath, 'r') as f:
obj = json.load(f)
return obj
def write_json(obj, fpath):
mkdir_if_missing(osp.dirname(fpath))
with open(fpath, 'w') as f:
json.dump(obj, f, indent=4, separators=(',', ': '))
def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):
mkdir_if_missing(osp.dirname(fpath))
torch.save(state, fpath)
if is_best:
shutil.copy(fpath, osp.join(osp.dirname(fpath), 'model_best.pth.tar'))
def load_checkpoint(fpath):
if osp.isfile(fpath):
# checkpoint = torch.load(fpath)
checkpoint = torch.load(fpath, map_location=torch.device('cpu'))
print("=> Loaded checkpoint '{}'".format(fpath))
return checkpoint
else:
raise ValueError("=> No checkpoint found at '{}'".format(fpath))
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
| 1,758 | 27.370968 | 78 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/osutils.py | from __future__ import absolute_import
import os
import errno
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| 214 | 16.916667 | 38 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/sampler.py | from __future__ import absolute_import
from collections import defaultdict
import math
import numpy as np
import copy
import random
import torch
from torch.utils.data.sampler import (
Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler,
WeightedRandomSampler)
def No_index(a, b):
assert isinstance(a, list)
return [i for i, j in enumerate(a) if j != b]
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, num_instances):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_samples = len(self.pids)
def __len__(self):
return self.num_samples * self.num_instances
def __iter__(self):
indices = torch.randperm(self.num_samples).tolist()
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
if len(t) >= self.num_instances:
t = np.random.choice(t, size=self.num_instances, replace=False)
else:
t = np.random.choice(t, size=self.num_instances, replace=True)
ret.extend(t)
return iter(ret)
class RandomMultipleGallerySampler(Sampler):
def __init__(self, data_source, num_instances=4, choice_c=0):
self.data_source = data_source
self.index_pid = defaultdict(int)
self.pid_cam = defaultdict(list)
self.pid_index = defaultdict(list)
self.num_instances = num_instances
self.choice_c=choice_c
for index, items in enumerate(data_source):# items: (_, pid, ..., pid2, cam)
self.index_pid[index] = items[self.choice_c+1]
self.pid_cam[items[self.choice_c+1]].append(items[-1])
self.pid_index[items[self.choice_c+1]].append(index)
self.pids = list(self.pid_index.keys())
self.num_samples = len(self.pids)
def __len__(self):
return self.num_samples * self.num_instances
def __iter__(self):
indices = torch.randperm(len(self.pids)).tolist()
ret = []
for kid in indices:
i = random.choice(self.pid_index[self.pids[kid]])
i_pid, i_cam = self.data_source[i][self.choice_c+1],self.data_source[i][-1]
ret.append(i)
pid_i = self.index_pid[i]
cams = self.pid_cam[pid_i]
index = self.pid_index[pid_i]
select_cams = No_index(cams, i_cam)
if select_cams:
if len(select_cams) >= self.num_instances:
cam_indexes = np.random.choice(select_cams, size=self.num_instances-1, replace=False)
else:
cam_indexes = np.random.choice(select_cams, size=self.num_instances-1, replace=True)
for kk in cam_indexes:
ret.append(index[kk])
else:
select_indexes = No_index(index, i)
if (not select_indexes): continue
if len(select_indexes) >= self.num_instances:
ind_indexes = np.random.choice(select_indexes, size=self.num_instances-1, replace=False)
else:
ind_indexes = np.random.choice(select_indexes, size=self.num_instances-1, replace=True)
for kk in ind_indexes:
ret.append(index[kk])
return iter(ret)
| 3,547 | 32.471698 | 108 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/base_dataset.py | # encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
import numpy as np
class BaseDataset(object):
"""
Base class of reid dataset
"""
def get_imagedata_info(self, data):
pids, cams = [], []
for item in data:
pids += [item[1]]
cams += [item[-1]]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_imgs = len(data)
return num_pids, num_imgs, num_cams
def get_videodata_info(self, data, return_tracklet_stats=False):
pids, cams, tracklet_stats = [], [], []
for img_paths, pid, camid in data:
pids += [pid]
cams += [camid]
tracklet_stats += [len(img_paths)]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_tracklets = len(data)
if return_tracklet_stats:
return num_pids, num_tracklets, num_cams, tracklet_stats
return num_pids, num_tracklets, num_cams
def print_dataset_statistics(self):
raise NotImplementedError
@property
def images_dir(self):
return None
class BaseImageDataset(BaseDataset):
"""
Base class of image reid dataset
"""
def print_dataset_statistics(self, train, query, gallery):
num_train_pids, num_train_imgs, num_train_cams = self.get_imagedata_info(train)
num_query_pids, num_query_imgs, num_query_cams = self.get_imagedata_info(query)
num_gallery_pids, num_gallery_imgs, num_gallery_cams = self.get_imagedata_info(gallery)
print("Dataset statistics:")
print(" ----------------------------------------")
print(" subset | # ids | # images | # cameras")
print(" ----------------------------------------")
print(" train | {:5d} | {:8d} | {:9d}".format(num_train_pids, num_train_imgs, num_train_cams))
print(" query | {:5d} | {:8d} | {:9d}".format(num_query_pids, num_query_imgs, num_query_cams))
print(" gallery | {:5d} | {:8d} | {:9d}".format(num_gallery_pids, num_gallery_imgs, num_gallery_cams))
print(" ----------------------------------------")
| 2,237 | 31.911765 | 112 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/transformer.py | from __future__ import absolute_import
from torchvision.transforms import *
from PIL import Image
import random
import math
import numpy as np
class RectScale(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
w, h = img.size
if h == self.height and w == self.width:
return img
return img.resize((self.width, self.height), self.interpolation)
class RandomSizedRectCrop(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.64, 1.0) * area
aspect_ratio = random.uniform(2, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.width, self.height), self.interpolation)
# Fallback
scale = RectScale(self.height, self.width,
interpolation=self.interpolation)
return scale(img)
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img | 3,358 | 33.989583 | 96 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/__init__.py | from __future__ import absolute_import
from .base_dataset import BaseImageDataset
from .preprocessor import Preprocessor
class IterLoader:
def __init__(self, loader, length=None):
self.loader = loader
self.length = length
self.iter = None
def __len__(self):
if (self.length is not None):
return self.length
return len(self.loader)
def new_epoch(self):
self.iter = iter(self.loader)
def next(self):
try:
return next(self.iter)
except:
self.iter = iter(self.loader)
return next(self.iter) | 619 | 23.8 | 44 | py |
Subsets and Splits