repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
cmm_ts | cmm_ts-main/models/attention/InputAttention.py | import tensorflow as tf
import keras.backend as K
from keras.layers import *
class InputAttention(Layer):
def __init__(self, config, name = 'Attention'):
super(InputAttention, self).__init__(name = name)
self.config = config
def build(self, inputs):
# input_shape = batch x n_past x n_features
# T = window size : n_past
# n = number of driving series : n_features
# m = size of hidden state + cell state
input_shape = inputs[0]
T = input_shape[1]
n = input_shape[-1]
m = inputs[1][0] + inputs[2][0]
# Ve = T x 1
self.Ve = self.add_weight(name='Ve', shape=(T, 1),
initializer='random_normal',
trainable = True)
# We = T x 2m
self.We = self.add_weight(name = 'We', shape = (T, m),
initializer='random_normal',
trainable = True)
# Ue = T x T
self.Ue = self.add_weight(name = 'Ue', shape = (T, T),
initializer = 'random_normal',
trainable = True)
super(InputAttention, self).build(input_shape)
def call(self, inputs):
x, past_h, past_c = inputs
# Hidden and cell states concatenation
conc = K.concatenate([past_h, past_c], axis = 0)
conc = K.concatenate([conc for _ in range(x.shape[-1])], axis = 1)
# print("[ht-1, ct-1] shape", conc.shape)
# Attention weights pre softmax
e = tf.matmul(tf.transpose(self.Ve), K.tanh(tf.matmul(self.We, conc) + tf.matmul(self.Ue, x)))
# print("e shape", e.shape)
# Attention weights
alpha = tf.nn.softmax(e, axis = 2)
# print("alpha shape", alpha.shape)
# New state
x_tilde = tf.math.multiply(x, alpha)
# print("x_tilde shape", x_tilde.shape)
return x_tilde
| 1,990 | 31.639344 | 102 | py |
cmm_ts | cmm_ts-main/models/attention/SelfAttention.py | import tensorflow as tf
import numpy as np
from keras.layers import *
from models.Constraints import *
import models.Words as W
import keras.backend as K
class SelfAttention(Layer):
def __init__(self, config, causal_vec : np.array, name = 'Attention'):
super(SelfAttention, self).__init__(name = name)
self.config = config
self.causal_vec = causal_vec
self.Dg = Dense(self.config[W.ATTUNITS], activation = 'tanh', use_bias = True)
if self.config[W.USECAUSAL]:
if not self.config[W.CTRAINABLE]:
constraint = Constant(self.causal_vec)
elif self.config[W.CTRAINABLE] and not self.config[W.USECONSTRAINT]:
constraint = None
elif self.config[W.CTRAINABLE] and self.config[W.USECONSTRAINT]:
constraint = Between(self.causal_vec, self.config[W.TRAINTHRESH])
self.Dalpha = Dense(self.config[W.NFEATURES], activation = 'sigmoid',
use_bias = True,
bias_initializer = tf.initializers.Constant(self.causal_vec),
bias_constraint = constraint)
else:
self.Dalpha = Dense(self.config[W.NFEATURES], activation = 'sigmoid',
use_bias = True)
def call(self, x):
# Attention weights
g = self.Dg(x)
alpha = self.Dalpha(g)
# New state
x_tilde = tf.math.multiply(x, alpha)
return x_tilde | 1,508 | 38.710526 | 93 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="covidcast",
version="0.2.1", # also update in docs/conf.py
author="Alex Reinhart",
author_email="[email protected]",
description="Access COVID-19 data through the Delphi COVIDcast API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://cmu-delphi.github.io/covidcast/covidcast-py/html/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=[
"pandas<2",
"requests",
"delphi-epidata>=4.1.1",
"geopandas",
"matplotlib",
"numpy",
"descartes",
"imageio-ffmpeg",
"imageio",
"tqdm",
"epiweeks"
],
package_data={"covidcast": ["shapefiles/*/*", "geo_mappings/*"]}
)
| 1,055 | 27.540541 | 72 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/tests/test_geography.py | import pytest
from covidcast import geography
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"not a fips",
{},
[None]
),
(
"42003",
{},
["Allegheny County"],
),
(
"4200",
{"ties_method": "all"},
[{"42000": ["Pennsylvania"], "42001": ["Adams County"],
"42003": ["Allegheny County"], "42005": ["Armstrong County"],
"42007": ["Beaver County"], "42009": ["Bedford County"]}]
),
(
"4200",
{},
["Pennsylvania"]
)
])
def test_fips_to_name(test_key, test_kwargs, expected):
assert geography.fips_to_name(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"not a cbsa",
{},
[None]
),
(
"38300",
{},
["Pittsburgh, PA"]
),
(
"389",
{"ties_method": "all"},
[{"38900": ["Portland-Vancouver-Hillsboro, OR-WA"], "38940": ["Port St. Lucie, FL"]}]
),
(
["38300", "389"],
{"ties_method": "all"},
[
{"38300": ["Pittsburgh, PA"]},
{"38900": ["Portland-Vancouver-Hillsboro, OR-WA"], "38940": ["Port St. Lucie, FL"]}
]
)
])
def test_cbsa_to_name(test_key, test_kwargs, expected):
assert geography.cbsa_to_name(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"CA",
{},
["California"]
),
(
"CAA",
{},
[None]
),
(
["CA", "PA"],
{},
["California", "Pennsylvania"]
),
(
["CAAA", "PA"],
{},
[None, "Pennsylvania"]
),
])
def test_abbr_to_name(test_key, test_kwargs, expected):
assert geography.abbr_to_name(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"California",
{},
["CA"]
),
(
"Californiaaaaa",
{},
[None]
),
(
["California", "Pennsylvania"],
{},
["CA", "PA"]
),
(
["California", "Pennsylvaniaa"],
{},
["CA", None]
),
(
["California", "Pennsylvania", "California"],
{},
["CA", "PA", "CA"]
),
])
def test_name_to_abbr(test_key, test_kwargs, expected):
assert geography.name_to_abbr(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"12",
{},
["FL"]
),
(
"7",
{"ties_method": "all"},
[{'17000': ['IL'],
'27000': ['MN'],
'37000': ['NC'],
'47000': ['TN'],
'72000': ['PR']}]
),
(
["ABC"],
{},
[None]
),
])
def test_fips_to_abbr(test_key, test_kwargs, expected):
assert geography.fips_to_abbr(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"Pittsburgh",
{},
["38300"]
),
(
"New",
{"state": "CA"},
[None]
),
(
"New",
{},
["14460"]
)
])
def test_name_to_cbsa(test_key, test_kwargs, expected):
assert geography.name_to_cbsa(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"PA",
{},
["42000"]
),
(
"New",
{},
[None]
),
(
["PA", "ca"],
{"ignore_case": True},
["42000", "06000"]
)
])
def test_abbr_to_fips(test_key, test_kwargs, expected):
assert geography.abbr_to_fips(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_key, test_kwargs, expected", [
(
"Allegheny",
{},
["42003"]
),
(
"Miami",
{},
["12086"]
),
(
"Miami",
{"ties_method": "all"},
[{"Miami-Dade County": ["12086"], "Miami County": ["18103", "20121", "39109"]}]
),
(
["Allegheny", "Miami", "New "],
{"ties_method": "all"},
[
{"Allegheny County": ["42003"]},
{"Miami-Dade County": ["12086"],
"Miami County": ["18103", "20121", "39109"]},
{"New Haven County": ["09009"], "New London County": ["09011"],
"New Castle County": ["10003"], "New Madrid County": ["29143"],
"New York County": ["36061"], "New Hanover County": ["37129"],
"New Kent County": ["51127"], "New Hampshire": ["33000"],
"New Jersey": ["34000"], "New Mexico": ["35000"],
"New York": ["36000"]}
]
),
(
["Allegheny", "Miami", "New "],
{},
["42003", "12086", "09009"]
),
(
"New ",
{"ties_method": "all", "state": "ny"},
[{"New York": ["36000"], "New York County": ["36061"]}]
),
])
def test_name_to_fips(test_key, test_kwargs, expected):
assert geography.name_to_fips(test_key, **test_kwargs) == expected
@pytest.mark.parametrize("test_args, test_kwargs, expected", [
(
(["a", "b"], ["a", "b", "c"], ["x", "y", "z"]),
{},
["x", "y"]
),
(
(["a", "b"], ["a", "a", "aa", "b"], ["w", "x", "y", "z"]),
{"ties_method": "all"},
[{"a": ["w", "x"], "aa": ["y"]}, {"b": ["z"]}]
),
(
(["a", "b"], ["A", "aa", "b"], ["x", "y", "z"]),
{"ignore_case": True},
["x", "z"]
),
(
(["a", "b"], ["a", "aa", "b"], ["x", "y", "z"]),
{"fixed": True},
["x", "z"]
),
(
(["a", "b"], ["a", "aa", "b"], ["x", "y", "z"]),
{"ties_method": "all", "fixed": True},
[{"a": ["x"]}, {"b": ["z"]}]
),
(
(["a", "b"], ["A", "aa", "b"], ["x", "y", "z"]),
{"ties_method": "all", "ignore_case": True},
[{"A": ["x"], "aa": ["y"]}, {"b": ["z"]}]
),
(
(["a", "b"], ["A", "aa", "b"], ["x", "y", "z"]),
{"ties_method": "all", "fixed": True, "ignore_case": True},
[{}, {"b": ["z"]}]
),
(
("A", ["aa", "a"], ["x", "y"]),
{},
[None]
),
(
("A", ["aa", "a"], ["x", "y"]),
{"ignore_case": True},
["x"]
),
(
(["A"], ["aa", "a"], ["x", "y"]),
{"ignore_case": True},
["x"]
),
(
("A", ["aa", "a"], ["x", "y"]),
{"ignore_case": True, "ties_method": "all"},
[{"aa": ["x"], "a": ["y"]}]
),
(
(["A"], ["aa", "a"], ["x", "y"]),
{"ignore_case": True, "ties_method": "all"},
[{"aa": ["x"], "a": ["y"]}]
),
(
("A", ["aa", "a"], ["x", "y"]),
{"ignore_case": True, "ties_method": "all"},
[{"aa": ["x"], "a": ["y"]}]
),
(
("A", ["aa", "a"], ["x", "y"]),
{"ignore_case": True},
["x"]
),
(
("a", ["a", "a"], ["x", "y"]),
{"ties_method": "all"},
[{"a": ["x", "y"]}]
),
(
("a", ["aa", "a"], ["x", "y"]),
{"ties_method": "all"},
[{"aa": ["x"], "a": ["y"]}]
),
(
("a", ["aa", "a"], ["x", "y"]),
{"ties_method": "all", "fixed": True},
[{"a": ["y"]}]
),
(
("A", ["aa", "a"], ["x", "y"]),
{"ties_method": "all", "fixed": True, "ignore_case": True},
[{}]
),
(
("A", ["a", "a"], ["x", "y"]),
{"ties_method": "all", "ignore_case": True},
[{"a": ["x", "y"]}]
)
])
def test__lookup(test_args, test_kwargs, expected):
assert geography._lookup(*test_args, **test_kwargs) == expected
with pytest.raises(ValueError):
geography._lookup(None, None, None, ties_method="not a real method")
@pytest.mark.parametrize("test_dict_list, expected_return, warn, expected_warning", [
(
[{"a": ["x", "y"]}],
["x"],
1,
"Some inputs were not uniquely matched; returning only the first match in "
"each case. To return all matches, set `ties_method='all'`"
),
(
[{"a": ["x", "y"], "b": ["i", "j", "k"]}],
["x"],
1,
"Some inputs were not uniquely matched; returning only the first match in "
"each case. To return all matches, set `ties_method='all'`"
),
(
[{"a": ["x", "y"]}, {"b": ["i", "j", "k"]}],
["x", "i"],
1,
"Some inputs were not uniquely matched; returning only the first match in "
"each case. To return all matches, set `ties_method='all'`"
),
(
[{"a": ["x"]}],
["x"],
0,
None
)
])
def test__get_first_tie(test_dict_list, expected_return, warn, expected_warning):
if warn:
with pytest.warns(UserWarning) as record:
assert geography._get_first_tie(test_dict_list) == expected_return
assert record[0].message.args[0] == expected_warning
else:
assert geography._get_first_tie(test_dict_list) == expected_return
| 9,873 | 25.330667 | 99 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/tests/test_plotting.py | import os
from datetime import date
from unittest.mock import patch
import matplotlib
import platform
import geopandas as gpd
# for some reason gpd.testing gives "no attribute" so we'll import it explicitly
import geopandas.testing as gpd_testing
import numpy as np
import pandas as pd
import pytest
from covidcast import plotting
SHAPEFILE_PATHS = {
"county": "../covidcast/shapefiles/county/cb_2019_us_county_5m.shp",
"state": "../covidcast/shapefiles/state/cb_2019_us_state_5m.shp",
"msa": "../covidcast/shapefiles/msa/cb_2019_us_cbsa_5m.shp",
"hrr": "../covidcast/shapefiles/hrr/geo_export_ad86cff5-e5ed-432e-9ec2-2ce8732099ee.shp"}
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
NON_GEOMETRY_COLS = ["geo_value", "time_value", "direction", "issue", "lag", "value", "stderr",
"sample_size", "geo_type", "data_source", "signal", "state_fips"]
def _convert_to_array(fig: matplotlib.figure.Figure) -> np.array:
"""Covert Matplotlib Figure into an numpy array for comparison."""
return np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) # get np array representation
@pytest.mark.skipif(platform.system() != "Linux", reason="Linux specific plot rendering expected.")
@patch("covidcast.plotting._signal_metadata")
def test_plot(mock_metadata):
mock_metadata.side_effect = [
{"mean_value": 0.5330011, "stdev_value": 0.4683431},
{"mean_value": 0.5330011, "stdev_value": 0.4683431},
{"mean_value": 0.5330011, "stdev_value": 0.4683431},
{"mean_value": 0.5304083, "stdev_value": 0.235302},
{"mean_value": 0.5705364, "stdev_value": 0.4348706},
{"mean_value": 0.5705364, "stdev_value": 0.4348706},
]
matplotlib.use("agg")
# load expected choropleth as an array
expected = np.load(os.path.join(CURRENT_PATH, "reference_data/expected_plot_arrays.npz"))
# test county plots
test_county = pd.read_csv(
os.path.join(CURRENT_PATH, "reference_data/test_input_county_signal.csv"),
dtype=str,
parse_dates=["time_value"]
)
test_county["value"] = test_county.value.astype("float")
# w/o megacounties
no_mega_fig1 = plotting.plot(test_county,
time_value=date(2020, 8, 4),
combine_megacounties=False)
# give margin of +-2 for floating point errors and weird variations (1 isn't consistent)
assert np.allclose(_convert_to_array(no_mega_fig1), expected["no_mega_1"], atol=2, rtol=0)
no_mega_fig2 = plotting.plot_choropleth(test_county,
cmap="viridis",
figsize=(5, 5),
edgecolor="0.8",
combine_megacounties=False)
assert np.allclose(_convert_to_array(no_mega_fig2), expected["no_mega_2"], atol=2, rtol=0)
# w/ megacounties
mega_fig = plotting.plot_choropleth(test_county, time_value=date(2020, 8, 4))
# give margin of +-2 for floating point errors and weird variations (1 isn't consistent)
assert np.allclose(_convert_to_array(mega_fig), expected["mega"], atol=2, rtol=0)
# test state
test_state = pd.read_csv(
os.path.join(CURRENT_PATH, "reference_data/test_input_state_signal.csv"),
dtype = str,
converters = {"value": float},
parse_dates = ["time_value"],
)
state_fig = plotting.plot(test_state)
assert np.allclose(_convert_to_array(state_fig), expected["state"], atol=2, rtol=0)
# test MSA
test_msa = pd.read_csv(
os.path.join(CURRENT_PATH, "reference_data/test_input_msa_signal.csv"),
dtype = str,
converters = {"value": float},
parse_dates = ["time_value"],
)
msa_fig = plotting.plot(test_msa)
assert np.allclose(_convert_to_array(msa_fig), expected["msa"], atol=2, rtol=0)
# test bubble
msa_bubble_fig = plotting.plot(test_msa, plot_type="bubble")
assert np.allclose(_convert_to_array(msa_bubble_fig), expected["msa_bubble"], atol=2, rtol=0)
def test_get_geo_df():
test_input = pd.DataFrame({"geo_value": ["24510", "31169", "37000"],
"value": [1.5, 2.5, 3],
"geo_type": ["county", "county", "county"],
"signal": ["a", "a", "a"],
"data_source": ["b", "b", "b"]})
# test counties
output1 = plotting.get_geo_df(test_input)
expected1 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected_get_geo_df_right.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected1, output1)
# test counties w/ left join
output2 = plotting.get_geo_df(test_input, join_type="left")
expected2 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected_get_geo_df_left.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected2, output2)
# test states
test_input["geo_type"] = "state"
test_input["geo_value"] = ["24510", "31169", "ca"]
output3 = plotting.get_geo_df(test_input)
expected3 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected_get_geo_df_state.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected3, output3)
# test MSAs
test_input["geo_type"] = "msa"
test_input["geo_value"] = ["10420", "10580", "ca"]
output4 = plotting.get_geo_df(test_input)
expected4 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected_get_geo_df_msa.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected4, output4)
# test HRRs
# test MSAs
test_input["geo_type"] = "hrr"
test_input["geo_value"] = ["10420", "102", "96"]
output5 = plotting.get_geo_df(test_input)
expected5 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected_get_geo_df_hrr.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected5, output5)
# test with sample signal
test_signal = pd.read_csv(
os.path.join(CURRENT_PATH, "reference_data/test_input_county_signal.csv"),
dtype={"geo_value": str}, parse_dates=["time_value", "issue"]
)
expected_geo_signal = gpd.read_file(os.path.join(CURRENT_PATH, "reference_data/expected_get_geo_df_right_2.gpkg"),dtype={"geo_value": str})
# geopandas reads file types slightly differently than pandas so need to recast
expected_geo_signal["time_value"] = expected_geo_signal.time_value.dt.tz_localize(None)
expected_geo_signal["issue"] = expected_geo_signal.issue.dt.tz_localize(None)
expected_geo_signal["direction"] = np.nan
output5 = plotting.get_geo_df(test_signal)
pd.testing.assert_frame_equal(expected_geo_signal, output5)
# test a non county or state geo_type
with pytest.raises(ValueError):
plotting.get_geo_df(pd.DataFrame(
{"geo_value": ["a"], "geo_type": ["b"], "signal": ["c"], "data_source": ["d"]}))
# test_duplicate_values
with pytest.raises(ValueError):
plotting.get_geo_df(pd.DataFrame({"geo_value": ["24510", "24510"]}))
def test__join_state_geo_df():
test_input = pd.DataFrame({"state_code": ["ca", "al", "ak"],
"value": [1.5, 2.5, 3]})
geo_info = gpd.read_file(os.path.join(CURRENT_PATH, SHAPEFILE_PATHS["state"]))
# test right join
output1 = plotting._join_state_geo_df(test_input, "state_code", geo_info)
assert type(output1) is gpd.GeoDataFrame
expected1 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected__join_state_geo_df_right.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected1, output1)
# test left join
output2 = plotting._join_state_geo_df(test_input, "state_code", geo_info, "left")
expected2 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected__join_state_geo_df_left.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected2, output2)
def test__join_county_geo_df():
test_input = pd.DataFrame({"county_code": ["24510", "31169", "37000"],
"test_value": [1.5, 2.5, 3],
"test_value2": [21.5, 32.5, 34]})
geo_info = gpd.read_file(os.path.join(CURRENT_PATH, SHAPEFILE_PATHS["county"]))
# test w/o megacounty combine
# test right join
no_mega_r = plotting._join_county_geo_df(test_input, "county_code", geo_info)
assert type(no_mega_r) is gpd.GeoDataFrame
expected_no_mega_r = gpd.read_file(
os.path.join(CURRENT_PATH,
"reference_data/expected__join_county_geo_df_no_mega_right.gpkg"),
dtype={"geo_value": str})
gpd_testing.assert_geodataframe_equal(expected_no_mega_r, no_mega_r)
# test left join
no_mega_l = plotting._join_county_geo_df(test_input, "county_code", geo_info, "left")
expected_no_mega_l = gpd.read_file(
os.path.join(CURRENT_PATH,
"reference_data/expected__join_county_geo_df_no_mega_left.gpkg"),
dtype={"geo_value": str})
gpd_testing.assert_geodataframe_equal(expected_no_mega_l, no_mega_l)
# test w/ megacounty combine
mega = plotting._join_county_geo_df(test_input, "county_code", geo_info, "left", True)
expected_mega = gpd.read_file(
os.path.join(CURRENT_PATH,
"reference_data/expected__join_county_geo_df_mega.gpkg"),
dtype={"geo_value": str})
gpd_testing.assert_geodataframe_equal(expected_mega, mega)
def test__join_msa_geo_df():
test_input = pd.DataFrame({"msa": ["10180", "10420", "10580"],
"test_value": [1.5, 2.5, 3],
"test_value2": [21.5, 32.5, 34]})
geo_info = gpd.read_file(os.path.join(CURRENT_PATH, SHAPEFILE_PATHS["msa"]))
# test right join
output1 = plotting._join_msa_geo_df(test_input, "msa", geo_info)
assert type(output1) is gpd.GeoDataFrame
# check that state parsing is working as intended
assert all(output1[output1.msa == "35620"].state_fips == "36")
expected1 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected__join_msa_geo_df_right.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected1, output1)
# test left join
output2 = plotting._join_msa_geo_df(test_input, "msa", geo_info, "left")
expected2 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected__join_msa_geo_df_left.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected2, output2)
def test__join_hrr_geo_df():
test_input = pd.DataFrame({"hrr": ["1", "102", "96"],
"test_value": [1.5, 2.5, 3],
"test_value2": [21.5, 32.5, 34]})
geo_info = gpd.read_file(os.path.join(CURRENT_PATH, SHAPEFILE_PATHS["hrr"]))
# test right join
output1 = plotting._join_hrr_geo_df(test_input, "hrr", geo_info)
assert type(output1) is gpd.GeoDataFrame
expected1 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected__join_hrr_geo_df_right.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected1, output1)
# test left join
output2 = plotting._join_hrr_geo_df(test_input, "hrr", geo_info, "left")
expected2 = gpd.read_file(
os.path.join(CURRENT_PATH, "reference_data/expected__join_hrr_geo_df_left.gpkg"),
dtype={"geo_value": str})
pd.testing.assert_frame_equal(expected2, output2)
def test__is_megacounty():
assert plotting._is_megacounty("12000")
assert not plotting._is_megacounty("12001")
assert not plotting._is_megacounty("120000")
| 11,890 | 42.716912 | 143 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/tests/test_covidcast.py | import warnings
from datetime import date, datetime
from unittest.mock import patch
# Force tests to use a specific backend, so they reproduce across platforms
import matplotlib
matplotlib.use("AGG")
import pandas as pd
import numpy as np
import pytest
from epiweeks import Week
from covidcast import covidcast
from covidcast.errors import NoDataWarning
def sort_df(df):
"""Helper function for sorting dfs for comparison."""
df = df.sort_index(axis=1)
df.sort_values(df.columns[0], inplace=True)
return df
@patch("covidcast.covidcast._signal_metadata")
@patch("delphi_epidata.Epidata.covidcast")
def test_signal(mock_covidcast, mock_metadata):
mock_covidcast.return_value = {"result": 1, # successful API response
"epidata": [{"time_value": 20200622,
"issue": 20200724,
"direction": None}],
"message": "success"}
mock_metadata.return_value = {"max_time": pd.Timestamp("2020-08-04 00:00:00"),
"min_time": pd.Timestamp("2020-08-03 00:00:00")}
return_rows = {"time_value": [datetime(2020, 6, 22)],
"issue": datetime(2020, 7, 24),
"geo_type": "county",
"data_source": "source",
"signal": "signal"
}
# test happy path with no start or end day and one geo_value
response = covidcast.signal("source", "signal", geo_values="CA")
expected = pd.DataFrame(return_rows, index=[0]*2)
assert sort_df(response).equals(sort_df(expected))
# test happy path with start and end day (8 days apart) and one geo_value
response = covidcast.signal("source", "signal", start_day=date(2020, 8, 1),
end_day=date(2020, 8, 8), geo_values="CA")
expected = pd.DataFrame(return_rows, index=[0]*8)
assert sort_df(response).equals(sort_df(expected))
# test duplicate geo values
response = covidcast.signal("source", "signal", start_day=date(2020, 8, 1),
end_day=date(2020, 8, 8), geo_values=["CA", "CA"])
expected = pd.DataFrame(return_rows, index=[0]*8)
assert sort_df(response).equals(sort_df(expected))
# test no df output
mock_covidcast.return_value = {"epidata": [],
"result": -2,
"message": "no results found"}
assert not covidcast.signal("source", "signal", geo_values=[])
# test incorrect geo
with pytest.raises(ValueError):
covidcast.signal("source", "signal", geo_type="not_a_real_geo")
# test invalid dates
with pytest.raises(ValueError):
covidcast.signal("source", "signal", geo_type="state",
start_day=date(2020, 4, 2), end_day=date(2020, 4, 1))
@patch("delphi_epidata.Epidata.covidcast_meta")
def test_metadata(mock_covidcast_meta):
# not generating full DF since most attributes used
mock_covidcast_meta.side_effect = [
{"result": 1, # successful API response
"epidata": [{"max_time": 20200622, "min_time": 20200421, "last_update": 12345, "time_type": "day"},
{"max_time": 20200724, "min_time": 20200512, "last_update": 99999, "time_type": "day"}],
"message": "success"},
{"result": 0, # unsuccessful API response
"epidata": [{"max_time": 20200622, "min_time": 20200421},
{"max_time": 20200724, "min_time": 20200512}],
"message": "error: failed"}]
# test happy path
response = covidcast.metadata()
expected = pd.DataFrame({
"max_time": [datetime(2020, 6, 22), datetime(2020, 7, 24)],
"min_time": [datetime(2020, 4, 21), datetime(2020, 5, 12)],
"last_update": [datetime(1970, 1, 1, 3, 25, 45), datetime(1970, 1, 2, 3, 46, 39)],
"time_type": ["day", "day"]})
assert sort_df(response).equals(sort_df(expected))
# test failed response raises RuntimeError
with pytest.raises(RuntimeError):
covidcast.metadata()
def test_aggregate_signals():
test_input1 = pd.DataFrame(
{"geo_value": ["a", "b", "c", "a"],
"time_value": [date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 2)],
"value": [2, 4, 6, 8],
"signal": ["i", "i", "i", "i"],
"geo_type": ["state", "state", "state", "state"],
"data_source": ["x", "x", "x", "x"]})
test_input2 = pd.DataFrame(
{"geo_value": ["a", "b", "c", "d"],
"time_value": [date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1)],
"value": [1, 3, 5, 7],
"signal": ["j", "j", "j", "j"],
"geo_type": ["state", "state", "state", "state"],
"data_source": ["y", "y", "y", "y"],
"extra_col": ["0", "0", "0", "0"]})
test_input3 = pd.DataFrame(
{"geo_value": ["b", "c", "d", "b"],
"time_value": [date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 2)],
"value": [0.5, 1.5, 2.5, 3.5],
"signal": ["k", "k", "k", "k"],
"geo_type": ["state", "state", "state", "state"],
"data_source": ["z", "z", "z", "z"]})
# test 3 signals from 3 sources with outer join
expected1 = pd.DataFrame(
{"geo_value": ["a", "b", "c", "d", "a", "b", "c", "d", "b"],
"time_value": [date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1),
date(2020, 1, 2), date(2020, 1, 2), date(2020, 1, 2), date(2020, 1, 2),
date(2020, 1, 3)],
"x_i_0_value": [2, 4, 6, np.nan, 8, np.nan, np.nan, np.nan, np.nan],
"y_j_1_value": [1, 3, 5, 7, np.nan, np.nan, np.nan, np.nan, np.nan],
"y_j_1_extra_col": ["0", "0", "0", "0", np.nan, np.nan, np.nan, np.nan, np.nan],
"z_k_2_value": [np.nan, np.nan, np.nan, np.nan, np.nan, 0.5, 1.5, 2.5, 3.5],
"geo_type": ["state"]*9})
assert covidcast.aggregate_signals(
[test_input1, test_input2, test_input3], dt=[0, 0, 1]).equals(expected1)
# test 3 signals from 3 sources with inner join has no intersection
assert covidcast.aggregate_signals(
[test_input1, test_input3], dt=[0, 1], join_type="inner").empty
# test 2 signals from same source (one lagged) with inner join
expected2 = pd.DataFrame(
{"geo_value": ["a"],
"time_value": [date(2020, 1, 2)],
"x_i_0_value": [8],
"x_i_1_value": [2],
"geo_type": ["state"]})
assert covidcast.aggregate_signals(
[test_input1, test_input1], dt=[0, 1], join_type="inner").equals(expected2)
# test same signal twice with a lag
expected3 = pd.DataFrame(
{"geo_value": ["a", "b", "c", "a", "b", "c", "a"],
"time_value": [date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 2),
date(2020, 1, 2), date(2020, 1, 2), date(2020, 1, 3)],
"x_i_0_value": [2, 4, 6, 8, np.nan, np.nan, np.nan],
"x_i_1_value": [np.nan, np.nan, np.nan, 2, 4, 6, 8],
"geo_type": ["state"]*7})
assert covidcast.aggregate_signals([test_input1, test_input1], dt=[0, 1]).equals(expected3)
# test invalid lag length
with pytest.raises(ValueError):
covidcast.aggregate_signals([test_input1, test_input1], dt=[0])
# test mixed geo_types
test_input4 = pd.DataFrame(
{"geo_value": ["b", "c", "d", "b"],
"time_value": [date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 1), date(2020, 1, 2)],
"value": [0.5, 1.5, 2.5, 3.5],
"signal": ["k", "k", "k", "k"],
"geo_type": ["county", "county", "county", "county"],
"data_source": ["z", "z", "z", "z"]})
with pytest.raises(ValueError):
covidcast.aggregate_signals([test_input1, test_input4])
def test__parse_datetimes():
assert covidcast._parse_datetimes("202001", "week") == pd.Timestamp(Week(2020, 1).startdate())
assert covidcast._parse_datetimes("20200205", "day") == pd.Timestamp("20200205")
assert pd.isna(covidcast._parse_datetimes("2020", "test"))
def test__detect_metadata():
test_input = pd.DataFrame(
{"data_source": ["a", "a"], "signal": ["b", "b"], "geo_type": ["c", "c"]})
assert covidcast._detect_metadata(test_input) == ("a", "b", "c")
# test heterogenous cases error
test_bad_source = pd.DataFrame(
{"data_source": ["a", "d"], "signal": ["b", "b"], "geo_type": ["c", "c"]})
with pytest.raises(ValueError):
covidcast._detect_metadata(test_bad_source)
test_bad_signal = pd.DataFrame(
{"data_source": ["a", "a"], "signal": ["d", "b"], "geo_type": ["c", "c"]})
with pytest.raises(ValueError):
covidcast._detect_metadata(test_bad_signal)
test_bad_geo = pd.DataFrame(
{"data_source": ["a", "a"], "signal": ["b", "b"], "geo_type": ["c", "x"]})
with pytest.raises(ValueError):
covidcast._detect_metadata(test_bad_geo)
@patch("delphi_epidata.Epidata.covidcast")
def test__fetch_epidata(mock_covidcast):
mock_covidcast.return_value = {"message": "failed"}
# test warning when an unknown bad response is received
with warnings.catch_warnings(record=True) as w:
covidcast._fetch_epidata("source", "signal", date(2020, 4, 2), date(2020, 4, 2),
"*", None, None, None, None)
assert len(w) == 1
assert str(w[0].message) == \
"Problem obtaining source signal data on 20200402 for geography '*': failed"
assert w[0].category is RuntimeWarning
# test warning when a no data response is received
mock_covidcast.return_value = {"message": "no results"} # no data API response
with warnings.catch_warnings(record=True) as w:
covidcast._fetch_epidata("source", "signal", date(2020, 4, 2), date(2020, 4, 2),
"county", None, None, None, None)
assert len(w) == 1
assert str(w[0].message) == "No source signal data found on 20200402 for geography 'county'"
assert w[0].category is NoDataWarning
# test no epidata yields nothing
mock_covidcast.return_value = {"message": "success"} # no epidata
assert not covidcast._fetch_epidata(None, None, date(2020, 4, 1), date(2020, 4, 1),
None, None, None, None, None)
# test end_day < start_day yields nothing
mock_covidcast.return_value = {"message": "success"} # no epidata
assert not covidcast._fetch_epidata(None, None, date(2020, 4, 2), date(2020, 4, 1),
None, None, None, None, None)
# not generating full DF since most attributes used
mock_covidcast.side_effect = [{"result": 1, # successful API response
"epidata": [{"time_value": 20200622,
"issue": 20200724,
"direction": None}],
"message": "success"},
{"result": 1, # second successful API
"epidata": [{"time_value": 20200821,
"issue": 20200925}],
"message": "success"}]
# test happy path with 2 day range
response = covidcast._fetch_epidata(
None, None, date(2020, 4, 2), date(2020, 4, 3), None, None, None, None, None)
expected = [pd.DataFrame({"time_value": [20200622],
"issue": [20200724],
"direction": None
}),
pd.DataFrame({"time_value": [20200821],
"issue": [20200925],
}),
]
assert len(response) == 2
pd.testing.assert_frame_equal(response[0], expected[0])
pd.testing.assert_frame_equal(response[1], expected[1])
@patch("delphi_epidata.Epidata.async_epidata")
def test__async_fetch_epidata(mock_async_epidata):
mock_async_epidata.return_value = [({"message": "failed"}, {"time_values": 20200402})]
# test warning when an unknown bad response is received
with warnings.catch_warnings(record=True) as w:
covidcast._async_fetch_epidata("source", "signal", date(2020, 4, 2), date(2020, 4, 2),
"*", None, None, None, None)
assert len(w) == 1
assert str(w[0].message) == \
"Problem obtaining source signal data on 20200402 for geography '*': failed"
assert w[0].category is RuntimeWarning
# test warning when a no data response is received
mock_async_epidata.return_value = [({"message": "no results"}, {"time_values": 20200402})] # no data API response
with warnings.catch_warnings(record=True) as w:
covidcast._async_fetch_epidata("source", "signal", date(2020, 4, 2), date(2020, 4, 2),
"county", None, None, None, None)
assert len(w) == 1
assert str(w[0].message) == "No source signal data found on 20200402 for geography 'county'"
assert w[0].category is NoDataWarning
# test no epidata yields nothing
mock_async_epidata.return_value = [({"message": "success"}, None)] # no epidata
assert not covidcast._async_fetch_epidata(None, None, date(2020, 4, 1), date(2020, 4, 1),
None, None, None, None, None)
# test end_day < start_day yields nothing
mock_async_epidata.return_value = [({"message": "success"}, None)] # no epidata
assert not covidcast._async_fetch_epidata(None, None, date(2020, 4, 2), date(2020, 4, 1),
None, None, None, None, None)
# not generating full DF since most attributes used
mock_async_epidata.return_value = [
({"result": 1, # successful API response
"epidata": [{"time_value": 20200622,
"issue": 20200724,
"direction": None}],
"message": "success"}, {}),
({"result": 1, # second successful API
"epidata": [{"time_value": 20200821,
"issue": 20200925}],
"message": "success"}, {})
]
# test happy path with 2 day range
response = covidcast._async_fetch_epidata(
None, None, date(2020, 4, 2), date(2020, 4, 3), None, None, None, None, None)
expected = [pd.DataFrame({"time_value": [20200622],
"issue": [20200724],
"direction": None}),
pd.DataFrame({"time_value": [20200821],
"issue": [20200925]}),
]
assert len(response) == 2
pd.testing.assert_frame_equal(response[0], expected[0])
pd.testing.assert_frame_equal(response[1], expected[1])
@patch("covidcast.covidcast.metadata")
def test__signal_metadata(mock_metadata):
mock_metadata.return_value = pd.DataFrame({"data_source": ["usa-facts", "doctor-visits"],
"signal": ["raw_cli", "smooth_cli"],
"time_type": ["day", "day"],
"geo_type": ["hrr", "state"]})
# test happy path
assert covidcast._signal_metadata("usa-facts", "raw_cli", "hrr") == \
{"data_source": "usa-facts", "signal": "raw_cli", "time_type": "day", "geo_type": "hrr"}
# test no matches raises ValueError
with pytest.raises(ValueError):
covidcast._signal_metadata("not", "actual", "values")
def test__date_to_api_string():
# since the function just wraps strftime, this is just to ensure the format doesn't change
assert covidcast._date_to_api_string(date(2020, 4, 2)) == "20200402"
assert covidcast._date_to_api_string(date(2020, 4, 2), time_type="week") == "202014"
def test__dates_to_api_strings():
# test happy paths
assert covidcast._dates_to_api_strings(date(2020, 4, 2)) == "20200402"
assert covidcast._dates_to_api_strings([date(2020, 4, 2),
date(2020, 5, 2)]) == "20200402-20200502"
assert covidcast._dates_to_api_strings(date(2020, 4, 2), time_type="week") == "202014"
assert covidcast._dates_to_api_strings([date(2020, 4, 2),
date(2020, 5, 2)], time_type="week") == "202014-202018"
| 16,595 | 46.965318 | 118 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'COVIDcast API client'
copyright = '2023, Delphi research group'
author = 'Delphi research group'
# The full version, including alpha/beta/rc tags
release = '0.2.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints',
'matplotlib.sphinxext.plot_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["videos"]
html_theme_options = {
"extra_nav_links": {
"Delphi group": "https://delphi.cmu.edu/",
"COVIDcast maps": "https://delphi.cmu.edu/covidcast/",
"COVIDcast API": "https://cmu-delphi.github.io/delphi-epidata/api/covidcast.html"
}
}
| 2,310 | 34.015152 | 89 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/covidcast/plotting.py | """This contains the plotting and geo data management methods for the COVIDcast signals."""
import io
import warnings
from datetime import date, timedelta
from typing import Tuple, Any
import geopandas as gpd
import imageio
import numpy as np
import pandas as pd
import pkg_resources
from matplotlib import pyplot as plt
from matplotlib import figure, axes
from tqdm import tqdm
from .covidcast import _detect_metadata, _signal_metadata
SHAPEFILE_PATHS = {"county": "shapefiles/county/cb_2019_us_county_5m.shp",
"state": "shapefiles/state/cb_2019_us_state_5m.shp",
"msa": "shapefiles/msa/cb_2019_us_cbsa_5m.shp",
"hrr": "shapefiles/hrr/geo_export_ad86cff5-e5ed-432e-9ec2-2ce8732099ee.shp"}
STATE_ABBR_TO_FIPS = {"AL": "01", "MN": "27", "ME": "23", "WA": "53", "LA": "22", "PA": "42",
"MD": "24", "CO": "08", "TN": "47", "MI": "26", "FL": "12", "VA": "51",
"IN": "18", "AS": "60", "HI": "15", "AZ": "04", "MO": "29", "SC": "45",
"DC": "11", "NM": "35", "MA": "25", "OR": "41", "MS": "28", "WI": "55",
"PR": "72", "NH": "33", "NV": "32", "GA": "13", "KY": "21", "NE": "31",
"WY": "56", "AK": "02", "OK": "40", "GU": "66", "DE": "10", "IA": "19",
"CA": "06", "VI": "78", "OH": "39", "NY": "36", "CT": "09", "AR": "05",
"VT": "50", "MP": "69", "MT": "30", "RI": "44", "WV": "54", "IL": "17",
"TX": "48", "UT": "49", "ND": "38", "KS": "20", "SD": "46", "NC": "37",
"NJ": "34", "ID": "16"}
# states within the contiguous US
CONTIGUOUS_FIPS = {"01", "04", "05", "06", "08", "09", "10", "11", "12", "13", "16", "17", "18",
"19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31",
"32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "44", "45",
"46", "47", "48", "49", "50", "51", "53", "54", "55", "56"}
def plot(data: pd.DataFrame,
time_value: date = None,
plot_type: str = "choropleth",
combine_megacounties: bool = True,
**kwargs: Any) -> figure.Figure:
"""Given the output data frame of :py:func:`covidcast.signal`, plot a choropleth or bubble map.
Projections used for plotting:
- ESRI:102003 (USA Contiguous Albers Equal Area Conic) for the contiguous US
and Puerto Rico
- ESRI:102006 (Alaska Albers Equal Area Conic) for Alaska
- ESRI:102007 (Hawaii Albers Equal Area Conic) for Hawaii
(Hawaii Albers Equal Area Conic) for Hawaii
For visual purposes, Alaska and Hawaii are moved the lower left corner of the contiguous US
and Puerto Rico is moved closer to Florida.
By default, choropleths use the `colormap
<https://matplotlib.org/tutorials/colors/colormaps.html>`_
``YlOrRd``, with colors scaled between 0 and the signal's historical mean value + 3
standard deviations. Custom arguments can be passed in as ``kwargs`` for
customizability. These arguments will be passed to the GeoPandas ``plot``
method; more information on these arguments can be found in `the GeoPandas
documentation
<https://geopandas.org/reference.html#geopandas.GeoDataFrame.plot>`_.
Bubble maps use a purple bubble by default, with all values discretized into 8 bins between 0.1
and the signal's historical mean value + 3 standard deviations. Values below 0 have no
bubble but have the region displayed in white, and values above the mean + 3 std dev are binned
into the highest bubble. Bubbles are scaled by area.
:param data: Data frame of signal values, as returned from :py:func:`covidcast.signal`.
:param time_value: If multiple days of data are present in ``data``, map only values from this
day. Defaults to plotting the most recent day of data in ``data``.
:param combine_megacounties: For each state, display all counties without a signal value as a
single polygon with the megacounty value, as opposed to plotting all the county boundaries.
Defaults to `True`.
:param kwargs: Optional keyword arguments passed to ``GeoDataFrame.plot()``.
:param plot_type: Type of plot to create. Either choropleth (default) or bubble map.
:return: Matplotlib figure object.
"""
if plot_type not in {"choropleth", "bubble"}:
raise ValueError("`plot_type` must be 'choropleth' or 'bubble'.")
data_source, signal, geo_type = _detect_metadata(data) # pylint: disable=W0212
meta = _signal_metadata(data_source, signal, geo_type) # pylint: disable=W0212
# use most recent date in data if none provided
day_to_plot = time_value if time_value else max(data.time_value)
day_data = data.loc[data.time_value == pd.to_datetime(day_to_plot), :]
kwargs["vmax"] = kwargs.get("vmax", meta["mean_value"] + 3 * meta["stdev_value"])
kwargs["figsize"] = kwargs.get("figsize", (12.8, 9.6))
fig, ax = _plot_background_states(kwargs["figsize"])
ax.set_title(f"{data_source}: {signal}, {day_to_plot.strftime('%Y-%m-%d')}")
if plot_type == "choropleth":
_plot_choro(ax, day_data, combine_megacounties, **kwargs)
else:
_plot_bubble(ax, day_data, geo_type, **kwargs)
return fig
def plot_choropleth(data: pd.DataFrame,
time_value: date = None,
combine_megacounties: bool = True,
**kwargs: Any) -> figure.Figure:
"""Plot choropleths for a signal. This method is deprecated and has been generalized to plot().
:param data: Data frame of signal values, as returned from :py:func:`covidcast.signal`.
:param time_value: If multiple days of data are present in ``data``, map only values from this
day. Defaults to plotting the most recent day of data in ``data``.
:param kwargs: Optional keyword arguments passed to ``GeoDataFrame.plot()``.
:return: Matplotlib figure object.
"""
warnings.warn("Function `plot_choropleth` is deprecated. Use `plot()` instead.")
return plot(data, time_value, "choropleth", combine_megacounties, **kwargs)
def get_geo_df(data: pd.DataFrame,
geo_value_col: str = "geo_value",
geo_type_col: str = "geo_type",
join_type: str = "right",
combine_megacounties: bool = False) -> gpd.GeoDataFrame:
"""Augment a :py:func:`covidcast.signal` data frame with the shape of each geography.
This method takes in a pandas DataFrame object and returns a GeoDataFrame
object from the `GeoPandas package <https://geopandas.org/>`_. The
GeoDataFrame will contain the geographic shape corresponding to every row in
its ``geometry`` column; for example, a data frame of county-level signal
observations will be returned with the shape of every county.
After detecting the geography type (state, county, HRR, and MSA are currently
supported) of the input, this function builds a GeoDataFrame that contains
state and geometry information from the Census or CMS for that geography type. By
default, it will take the signal data (left side) and geo data (right side)
and right join them, so all states/counties will always be present
regardless of whether ``data`` contains values for those locations. ``left``,
``outer``, and ``inner`` joins are also supported and can be selected with
the ``join_type`` argument.
If ``combine_megacounties=False`` (default) all counties without a signal value will be
given the value of the megacounty if present. If ``combine_megacounties=True``, a left join
will be conducted and the megacounty rows will be given a polygon of the union of all
constituent counties without a value. Other joins will not use megacounties.
See the `geographic coding documentation
<https://cmu-delphi.github.io/delphi-epidata/api/covidcast_geography.html>`_
for information about megacounties.
By default, this function identifies the geography for each row of the input
data frame using its ``geo_value`` column, matching data frames returned by
:py:func:`covidcast.signal`, but the ``geo_value_col`` and ``geo_type_col``
arguments can be provided to match geographies for data frames with
different column names.
Geographic data is sourced from 1:5,000,000-scale shapefiles from the `2019
US Census Cartographic Boundary Files
<https://www.census.gov/geographies/mapping-files/time-series/geo/cartographic-boundary.html>`_
and the `CMS Data Website <https://data.cms.gov/widgets/ia25-mrsk>`_.
:param data: DataFrame of values and geographies.
:param geo_value_col: Name of column containing values of interest.
:param geo_type_col: Name of column containing geography type.
:param join_type: Type of join to do between input data (left side) and geo data (right side).
Must be one of `right` (default), `left`, `outer`, or `inner`.
:param combine_megacounties: For each state, return all counties without a signal value as a
single row and polygon with the megacounty value. Defaults to `False`.
:return: GeoDataFrame containing all columns from the input ``data``, along
with a ``geometry`` column (containing a polygon) and a ``state_fips``
column (a two-digit FIPS code identifying the US state containing this
geography). For MSAs that span multiple states, the first state in the MSA name is provided.
The geometry is given in the GCS NAD83 coordinate system for states, counties, and MSAs, and
WGS84 for HRRs.
"""
if join_type == "right" and any(data[geo_value_col].duplicated()):
raise ValueError("join_type `right` is incompatible with duplicate values in a "
"given region. Use `left` or ensure your input data is a single signal for"
" a single date and geography type. ")
geo_type = _detect_metadata(data, geo_type_col)[2] # pylint: disable=W0212
if geo_type not in ["state", "county", "msa", "hrr"]:
raise ValueError("Unsupported geography type; "
"only `state`, `county`, `hrr`, and `msa` supported.")
shapefile_path = pkg_resources.resource_filename(__name__, SHAPEFILE_PATHS[geo_type])
geo_info = gpd.read_file(shapefile_path)
if geo_type == "state":
output = _join_state_geo_df(data, geo_value_col, geo_info, join_type)
elif geo_type == "msa":
output = _join_msa_geo_df(data, geo_value_col, geo_info, join_type)
elif geo_type == "hrr":
geo_info["geometry"] = geo_info["geometry"].translate(0, -0.185) # fix projection shift bug
output = _join_hrr_geo_df(data, geo_value_col, geo_info, join_type)
else: # geo_type must be "county"
output = _join_county_geo_df(data, geo_value_col, geo_info, join_type, combine_megacounties)
return output
def animate(data: pd.DataFrame, filepath: str, fps: int = 3, dpi: int = 150, **kwargs: Any) -> None:
"""Generate an animated video file of a signal over time.
Given a signal DataFrame, generates the choropleth for each day to form an animation of the
signal. Accepts arguments for video parameters as well as optional plotting arguments.
Supported output formats are listed in the
`imageio ffmpeg documentation <https://imageio.readthedocs.io/en/stable/format_ffmpeg.html>`_.
:param data: DataFrame for a single signal over time.
:param filepath: Path where video will be saved. Filename must contain supported extension.
:param fps: Frame rate in frames per second for animation. Defaults to 3.
:param dpi: Dots per inch for output video. Defaults to 150 on a 12.8x9.6 figure (1920x1440).
:param kwargs: Optional keyword arguments passed to :py:func:`covidcast.plot`.
:return: None
"""
# probesize is set to avoid warning by ffmpeg on frame rate up to 4k resolution.
writer = imageio.get_writer(filepath, fps=fps, input_params=["-probesize", "75M"])
num_days = (max(data.time_value) - min(data.time_value)).days
day_list = [min(data.time_value) + timedelta(days=x) for x in range(num_days+1)]
for d in tqdm(day_list):
buf = io.BytesIO()
plot(data, time_value=d, **kwargs)
plt.savefig(buf, dpi=dpi)
plt.close()
buf.seek(0)
writer.append_data(imageio.imread(buf))
writer.close()
def _plot_choro(ax: axes.Axes,
data: gpd.GeoDataFrame,
combine_megacounties: bool,
**kwargs: Any) -> None:
"""Generate a choropleth map on a given Figure/Axes from a GeoDataFrame.
:param ax: Matplotlib axes to plot on.
:param data: GeoDataFrame with information to plot.
:param kwargs: Optional keyword arguments passed to ``GeoDataFrame.plot()``.
:return: Matplotlib axes with the plot added.
"""
kwargs["vmin"] = kwargs.get("vmin", 0)
kwargs["cmap"] = kwargs.get("cmap", "YlOrRd")
data_w_geo = get_geo_df(data, combine_megacounties=combine_megacounties)
for shape in _project_and_transform(data_w_geo):
if not shape.empty:
shape.plot(column="value", ax=ax, **kwargs)
sm = plt.cm.ScalarMappable(cmap=kwargs["cmap"],
norm=plt.Normalize(vmin=kwargs["vmin"], vmax=kwargs["vmax"]))
# this is to remove the set_array error that occurs on some platforms
sm._A = [] # pylint: disable=W0212
plt.colorbar(sm, ticks=np.linspace(kwargs["vmin"], kwargs["vmax"], 8), ax=ax,
orientation="horizontal", fraction=0.045, pad=0.04, format="%.2f")
def _plot_bubble(ax: axes.Axes, data: gpd.GeoDataFrame, geo_type: str, **kwargs: Any) -> None:
"""Generate a bubble map on a given Figure/Axes from a GeoDataFrame.
The maximum bubble size is set to the figure area / 1.5, with a x3 multiplier if the geo_type
is ``state``.
:param ax: Matplotlib axes to plot on.
:param data: GeoDataFrame with information to plot.
:param kwargs: Optional keyword arguments passed to ``GeoDataFrame.plot()``.
:return: Matplotlib axes with the plot added.
"""
kwargs["vmin"] = kwargs.get("vmin", 0.1)
kwargs["color"] = kwargs.get("color", "purple")
kwargs["alpha"] = kwargs.get("alpha", 0.5)
data_w_geo = get_geo_df(data, join_type="inner")
label_bins = np.linspace(kwargs["vmin"], kwargs["vmax"], 8) # set bin labels
value_bins = list(label_bins) + [np.inf] # set ranges for bins by adding +inf for largest bin
# set max bubble size proportional to figure size, with a multiplier for state plots
state_multiple = 3 if geo_type == "state" else 1
bubble_scale = np.prod(kwargs["figsize"]) / 1.5 / kwargs["vmax"] * state_multiple
# discretize data and scale labels to correct sizes
data_w_geo["binval"] = pd.cut(data_w_geo.value, labels=label_bins, bins=value_bins, right=False)
data_w_geo["binval"] = data_w_geo.binval.astype(float) * bubble_scale
for shape in _project_and_transform(data_w_geo):
if not shape.empty and not shape.binval.isnull().values.all():
shape.plot(color="1", ax=ax, legend=True, edgecolor="0.8", linewidth=0.5)
shape["geometry"] = shape["geometry"].centroid # plot bubbles at each polgyon centroid
shape.plot(markersize="binval", color=kwargs["color"], ax=ax, alpha=kwargs["alpha"])
# to generate the legend, need to plot the reference points as scatter plots off the map
for b in label_bins:
ax.scatter([1e10], [1e10], color=kwargs["color"], alpha=kwargs["alpha"],
s=b*bubble_scale, label=round(b, 2))
ax.legend(frameon=False, ncol=8, loc="lower center", bbox_to_anchor=(0.5, -0.1))
def _plot_background_states(figsize: tuple) -> tuple:
"""Plot US states in light grey as the background for other plots.
:param figsize: Dimensions of plot.
:return: Matplotlib figure and axes.
"""
fig, ax = plt.subplots(1, figsize=figsize)
ax.axis("off")
state_shapefile_path = pkg_resources.resource_filename(__name__, SHAPEFILE_PATHS["state"])
state = gpd.read_file(state_shapefile_path)
for state in _project_and_transform(state, "STATEFP"):
state.plot(color="0.9", ax=ax, edgecolor="0.8", linewidth=0.5)
ax.set_xlim(plt.xlim())
ax.set_ylim(plt.ylim())
return fig, ax
def _project_and_transform(data: gpd.GeoDataFrame,
state_col: str = "state_fips") -> Tuple:
"""Segment and break GeoDF into Contiguous US, Alaska, Puerto Rico, and Hawaii for plotting.
Given GeoDF with state fips column, break into Contiguous US, Alaska, Puerto Rico, and Hawaii
GeoDFs with their own Albers Equal Area Conic Projections.
Also scales and translates so Alaska and Hawaii are in the bottom left corner and Puerto Rico
is closer to Hawaii.
:param data: GeoDF with shape info and a column designating the state.
:param state_col: Name of column with state FIPS codes.
:return: Tuple of four GeoDFs: Contiguous US, Alaska, Hawaii, and Puerto Rico.
"""
cont = data.loc[[i in CONTIGUOUS_FIPS for i in data[state_col]], :].to_crs("ESRI:102003")
alaska = data.loc[data[state_col] == "02", :].to_crs("ESRI:102006")
pr = data.loc[data[state_col] == "72", :].to_crs("ESRI:102003")
hawaii = data.loc[data[state_col] == "15", :].to_crs("ESRI:102007")
alaska.geometry = alaska.geometry.scale(0.35, 0.35, origin=(0, 0)).translate(-1.8e6, -1.6e6)
hawaii.geometry = hawaii.geometry.translate(-1e6, -2e6)
pr.geometry = pr.geometry.translate(-1.2e6, 0.5e6)
return cont, alaska, pr, hawaii
def _join_state_geo_df(data: pd.DataFrame,
state_col: str,
geo_info: gpd.GeoDataFrame,
join_type: str = "right") -> gpd.GeoDataFrame:
"""Join DF information to polygon information in a GeoDF at the state level.
:param data: DF with state info
:param state_col: name of column in `data` containing state info to join on
:param geo_info: GeoDF of state shape info read from Census shapefiles
:param join_type: Type of join to do between input data (left side) and geo data (right side).
Must be one of {‘left’, ‘right’, ‘outer’, ‘inner’}.
:return: ``data`` with state polygon and state FIPS joined.
"""
input_cols = list(data.columns)
geo_info.STUSPS = [i.lower() for i in geo_info.STUSPS] # lowercase for consistency
merged = data.merge(geo_info, how=join_type, left_on=state_col, right_on="STUSPS", sort=True)
# use full state list in the return
merged[state_col] = merged.STUSPS.combine_first(merged[state_col])
merged.rename(columns={"STATEFP": "state_fips"}, inplace=True)
return gpd.GeoDataFrame(merged[input_cols + ["state_fips", "geometry"]])
def _join_county_geo_df(data: pd.DataFrame,
county_col: str,
geo_info: gpd.GeoDataFrame,
join_type: str = "right",
combine_megacounties: bool = False) -> gpd.GeoDataFrame:
"""Join DF information to polygon information in a GeoDF at the county level.
Counties with no direct key in the data DF will have the megacounty value joined.
:param data: DF with county info.
:param county_col: name of column in `data` containing county info to join on.
:param geo_info: GeoDF of county shape info read from Census shapefiles.
:param join_type: Type of join to do between input data (left side) and geo data (right side).
Must be one of {‘left’, ‘right’, ‘outer’, ‘inner’}.
:param combine_megacounties: For each state, return all counties without a signal value as a
single polygon with the megacounty value.
:return: ``data`` with county polygon and state fips joined.
"""
input_cols = list(data.columns)
# create state FIPS code in copy, otherwise original gets modified
data = data.assign(state=[i[:2] for i in data[county_col]])
if combine_megacounties:
merged = _combine_megacounties(data, county_col, geo_info)
else:
merged = _distribute_megacounties(data, county_col, geo_info, join_type)
merged[county_col] = merged.GEOID.combine_first(merged[county_col])
merged.rename(columns={"STATEFP": "state_fips"}, inplace=True)
return gpd.GeoDataFrame(merged[input_cols + ["state_fips", "geometry"]])
def _combine_megacounties(data: pd.DataFrame,
county_col: str,
geo_info: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""Join a DataFrame of county signals with a GeoDataFrame of polygons for plotting.
Merges a DataFrame of counties and signals with a DataFrame of county polygons. Megacounties,
if present, are assigned a polygon which is the union of all counties in the state with no
signal value.
:param data: DataFrame of county signals.
:param county_col: Name of column containing county.
:parm geo_info: GeoDataFrame of counties and corresponding polygons.
:return: ``data`` with county polygon and state fips joined. No polgyon information is
provided for counties without a signal value since they are captured by the megacounty
polygon.
"""
merged = data.merge(geo_info, how="left", left_on=county_col, right_on="GEOID", sort=True)
missing = set(geo_info.GEOID) - set(data[county_col])
for i, row in merged.iterrows():
if _is_megacounty(row[county_col]):
state = row[county_col][:2]
state_missing = [j for j in missing if j.startswith(state)]
combined_poly = geo_info.loc[geo_info.GEOID.isin(state_missing), "geometry"].unary_union
# pandas has a bug when assigning MultiPolygons, so you need to do this weird workaround
# https://github.com/geopandas/geopandas/issues/992
merged.loc[[i], "geometry"] = gpd.GeoSeries(combined_poly).values
merged.loc[[i], "STATEFP"] = state
return merged
def _distribute_megacounties(data: pd.DataFrame,
county_col: str,
geo_info: gpd.GeoDataFrame,
join_type: str = "right") -> gpd.GeoDataFrame:
"""Join a DataFrame of county signals with a GeoDataFrame of polygons for plotting.
Merges a DataFrame of counties and signals with a DataFrame of county polygons. Counties
without a value but with a corresponding megacounty take on the megacounty value.
:param data: DataFrame of county signals.
:param county_col: Name of column containing county.
:param geo_info: GeoDataFrame of counties and corresponding polygons.
:param join_type: Type of join to do between input data (left side) and geo data (right side).
Must be one of {‘left’, ‘right’, ‘outer’, ‘inner’}.
:return: ``data`` with county polygon and state fips joined. No polgyon information is
provided for megacounties.
"""
# join all counties with valid FIPS
merged = data.merge(geo_info, how=join_type, left_on=county_col, right_on="GEOID", sort=True)
mega_df = data.loc[[_is_megacounty(i) for i in data[county_col]], :]
if not mega_df.empty and join_type == "right":
# if mega counties exist, join them on state
merged = merged.merge(mega_df, how="left", left_on="STATEFP", right_on="state", sort=True)
# if no county value present, us the megacounty values
for c in data.columns:
merged[c] = merged[f"{c}_x"].combine_first(merged[f"{c}_y"])
return merged
def _join_msa_geo_df(data: pd.DataFrame,
msa_col: str,
geo_info: gpd.GeoDataFrame,
join_type: str = "right") -> gpd.GeoDataFrame:
"""Join DF information to polygon information in a GeoDF at the MSA level.
For MSAs which span multiple states, the first state in the name is returned for the state FIPS.
:param data: DF with state info
:param msa_col: cname of column in `data` containing state info to join on
:param geo_info: GeoDF of state shape info read from Census shapefiles
:param join_type: Type of join to do between input data (left side) and geo data (right side).
Must be one of {‘left’, ‘right’, ‘outer’, ‘inner’}.
:return: ``data`` with cbsa polygon and state fips joined.
"""
geo_info = geo_info[geo_info.LSAD == "M1"] # only get metro and not micropolitan areas
input_cols = list(data.columns)
merged = data.merge(geo_info, how=join_type, left_on=msa_col, right_on="GEOID", sort=True)
# use full state list in the return
merged[msa_col] = merged.GEOID.combine_first(merged[msa_col])
# get the first state, which will be the first two characters after the comma and whitespace
merged["state_fips"] = [STATE_ABBR_TO_FIPS.get(i.split(",")[1][1:3]) for i in merged.NAME]
return gpd.GeoDataFrame(merged[input_cols + ["state_fips", "geometry"]])
def _join_hrr_geo_df(data: pd.DataFrame,
msa_col: str,
geo_info: gpd.GeoDataFrame,
join_type: str = "right") -> gpd.GeoDataFrame:
"""Join DF information to polygon information in a GeoDF at the HRR level.
:param data: DF with state info
:param msa_col: cname of column in `data` containing state info to join on
:param geo_info: GeoDF of state shape info read from Census shapefiles
:param join_type: Type of join to do between input data (left side) and geo data (right side).
Must be one of {‘left’, ‘right’, ‘outer’, ‘inner’}.
:return: ``data`` with HRR polygon and state fips joined.
"""
geo_info["hrr_num"] = geo_info.hrr_num.astype("int").astype(str) # original col was a float
input_cols = list(data.columns)
merged = data.merge(geo_info, how=join_type, left_on=msa_col, right_on="hrr_num", sort=True)
# use full state list in the return
merged[msa_col] = merged.hrr_num.combine_first(merged[msa_col])
# get the first state, which will be the first two characters in the HRR name
merged["state_fips"] = [STATE_ABBR_TO_FIPS.get(i[:2]) for i in merged.hrr_name]
return gpd.GeoDataFrame(merged[input_cols + ["state_fips", "geometry"]])
def _is_megacounty(fips: str) -> bool:
"""Determine if a code is a megacounty.
:param fips: FIPS code to test.
:return: Boolean for if the input code is a megacounty or not.
"""
return fips.endswith("000") and len(fips) == 5
| 26,547 | 52.309237 | 100 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/covidcast/errors.py | """Custom warnings and exceptions for covidcast functions."""
class NoDataWarning(Warning):
"""Warning raised when no data is returned on a given day by covidcast.signal()."""
| 182 | 29.5 | 87 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/covidcast/covidcast.py | """This is the client side library for accessing the COVIDcast API."""
import warnings
from datetime import timedelta, date
from functools import reduce
from typing import Union, Iterable, Tuple, List
import pandas as pd
import numpy as np
from delphi_epidata import Epidata
from epiweeks import Week
from .errors import NoDataWarning
# Point API requests to the default endpoint
Epidata.BASE_URL = "https://api.covidcast.cmu.edu/epidata/api.php"
VALID_GEO_TYPES = {"county", "hrr", "msa", "dma", "state", "hhs", "nation"}
_ASYNC_CALL = False
def use_api_key(key):
"""Set the API key to use for all subsequent queries.
:param key: String containing the API key for you and/or your group.
Anyone may access the Epidata API anonymously without providing an API key.
Anonymous API access is currently rate-limited and with a maximum of two of
the requested parameters having multiple selections (signals, dates, issues,
regions, etc). To be exempt from these limits, use this function to apply an
API key to all subsequent queries. You can register for an API key at
<https://api.delphi.cmu.edu/epidata/admin/registration_form>.
Consult the `API documentation
<https://cmu-delphi.github.io/delphi-epidata/api/api_keys.html>`_
for details on our API key policies.
"""
Epidata.auth = ("epidata", key)
def signal(data_source: str,
signal: str, # pylint: disable=W0621
start_day: date = None,
end_day: date = None,
geo_type: str = "county",
geo_values: Union[str, Iterable[str]] = "*",
as_of: date = None,
issues: Union[date, Tuple[date], List[date]] = None,
lag: int = None,
time_type: str = "day") -> Union[pd.DataFrame, None]:
"""Download a Pandas data frame for one signal.
Obtains data for selected date ranges for all geographic regions of the
United States. Available data sources and signals are documented in the
`COVIDcast signal documentation
<https://cmu-delphi.github.io/delphi-epidata/api/covidcast_signals.html>`_.
Most (but not all) data sources are available at the county level, but the
API can also return data aggregated to metropolitan statistical areas,
hospital referral regions, or states, as desired, by using the ``geo_type``
argument.
The COVIDcast API tracks updates and changes to its underlying data, and
records the first date each observation became available. For example, a
data source may report its estimate for a specific state on June 3rd on June
5th, once records become available. This data is considered "issued" on June
5th. Later, the data source may update its estimate for June 3rd based on
revised data, creating a new issue on June 8th. By default, ``signal()``
returns the most recent issue available for every observation. The
``as_of``, ``issues``, and ``lag`` parameters allow the user to select
specific issues instead, or to see all updates to observations. These
options are mutually exclusive; if you specify more than one, ``as_of`` will
take priority over ``issues``, which will take priority over ``lag``.
Note that the API only tracks the initial value of an estimate and *changes*
to that value. If a value was first issued on June 5th and never updated,
asking for data issued on June 6th (using ``issues`` or ``lag``) would *not*
return that value, though asking for data ``as_of`` June 6th would.
Note also that the API enforces a maximum result row limit; results beyond
the maximum limit are truncated. This limit is sufficient to fetch
observations in all counties in the United States on one day. This client
automatically splits queries for multiple days across multiple API calls.
However, if data for one day has been issued many times, using the
``issues`` argument may return more results than the query limit. A warning
will be issued in this case. To see all results, split your query across
multiple calls with different ``issues`` arguments.
See the `COVIDcast API documentation
<https://cmu-delphi.github.io/delphi-epidata/api/covidcast.html>`_ for more
information on available geography types, signals, and data formats, and
further discussion of issue dates and data versioning.
:param data_source: String identifying the data source to query, such as
``"fb-survey"``.
:param signal: String identifying the signal from that source to query,
such as ``"smoothed_cli"``.
:param start_day: Query data beginning on this date. Provided as a
``datetime.date`` object. If ``start_day`` is ``None``, defaults to the
first day data is available for this signal. If ``time_type == "week"``, then
this is rounded to the epiweek containing the day (i.e. the previous Sunday).
:param end_day: Query data up to this date, inclusive. Provided as a
``datetime.date`` object. If ``end_day`` is ``None``, defaults to the most
recent day data is available for this signal. If ``time_type == "week"``, then
this is rounded to the epiweek containing the day (i.e. the previous Sunday).
:param geo_type: The geography type for which to request this data, such as
``"county"`` or ``"state"``. Available types are described in the
COVIDcast signal documentation. Defaults to ``"county"``.
:param geo_values: The geographies to fetch data for. The default, ``"*"``,
fetches all geographies. To fetch one geography, specify its ID as a
string; multiple geographies can be provided as an iterable (list, tuple,
...) of strings.
:param as_of: Fetch only data that was available on or before this date,
provided as a ``datetime.date`` object. If ``None``, the default, return
the most recent available data. If ``time_type == "week"``, then
this is rounded to the epiweek containing the day (i.e. the previous Sunday).
:param issues: Fetch only data that was published or updated ("issued") on
these dates. Provided as either a single ``datetime.date`` object,
indicating a single date to fetch data issued on, or a tuple or list
specifying (start, end) dates. In this case, return all data issued in
this range. There may be multiple rows for each observation, indicating
several updates to its value. If ``None``, the default, return the most
recently issued data. If ``time_type == "week"``, then these are rounded to
the epiweek containing the day (i.e. the previous Sunday).
:param lag: Integer. If, for example, ``lag=3``, fetch only data that was
published or updated exactly 3 days after the date. For example, a row
with ``time_value`` of June 3 will only be included in the results if its
data was issued or updated on June 6. If ``None``, the default, return the
most recently issued data regardless of its lag.
:param time_type: The temporal resolution to request this data. Most signals
are available at the "day" resolution (the default); some are only
available at the "week" resolution, representing an MMWR week ("epiweek").
:returns: A Pandas data frame with matching data, or ``None`` if no data is
returned. Each row is one observation on one day in one geographic location.
Contains the following columns:
``geo_value``
Identifies the location, such as a state name or county FIPS code. The
geographic coding used by COVIDcast is described in the `API
documentation here
<https://cmu-delphi.github.io/delphi-epidata/api/covidcast_geography.html>`_.
``signal``
Name of the signal, same as the value of the ``signal`` input argument. Used for
downstream functions to recognize where this signal is from.
``time_value``
Contains a `pandas Timestamp object
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timestamp.html>`_
identifying the date this estimate is for. For data with ``time_type = "week"``, this
is the first day of the corresponding epiweek.
``issue``
Contains a `pandas Timestamp object
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timestamp.html>`_
identifying the date this estimate was issued. For example, an estimate
with a ``time_value`` of June 3 might have been issued on June 5, after
the data for June 3rd was collected and ingested into the API.
``lag``
Integer giving the difference between ``issue`` and ``time_value``,
in days.
``value``
The signal quantity requested. For example, in a query for the
``confirmed_cumulative_num`` signal from the ``usa-facts`` source,
this would be the cumulative number of confirmed cases in the area, as
of the ``time_value``.
``stderr``
The value's standard error, if available.
``sample_size``
Indicates the sample size available in that geography on that day;
sample size may not be available for all signals, due to privacy or
other constraints.
``geo_type``
Geography type for the signal, same as the value of the ``geo_type`` input argument.
Used for downstream functions to parse ``geo_value`` correctly
``data_source``
Name of the signal source, same as the value of the ``data_source`` input argument. Used for
downstream functions to recognize where this signal is from.
Consult the `signal documentation
<https://cmu-delphi.github.io/delphi-epidata/api/covidcast_signals.html>`_
for more details on how values and standard errors are calculated for
specific signals.
"""
if geo_type not in VALID_GEO_TYPES:
raise ValueError("geo_type must be one of " + ", ".join(VALID_GEO_TYPES))
if start_day is None or end_day is None:
signal_meta = _signal_metadata(data_source, signal, geo_type)
start_day = signal_meta["min_time"].to_pydatetime().date() \
if start_day is None else start_day
end_day = signal_meta["max_time"].to_pydatetime().date() \
if end_day is None else end_day
if start_day > end_day:
raise ValueError("end_day must be on or after start_day, but "
f"start_day = '{start_day}', end_day = '{end_day}'")
if _ASYNC_CALL:
dfs = _async_fetch_epidata(
data_source, signal, start_day, end_day, geo_type,
geo_values, as_of, issues, lag, time_type
)
else:
dfs = _fetch_epidata(
data_source, signal, start_day, end_day, geo_type,
geo_values, as_of, issues, lag, time_type
)
if len(dfs) > 0:
out = pd.concat(dfs)
out.drop("direction", axis=1, inplace=True)
out["time_value"] = out["time_value"].apply(lambda x: _parse_datetimes(x, time_type))
out["issue"] = out["issue"].apply(lambda x: _parse_datetimes(x, time_type))
out["geo_type"] = geo_type
out["data_source"] = data_source
out["signal"] = signal
return out
return None
def metadata() -> pd.DataFrame:
"""Fetch COVIDcast surveillance stream metadata.
Obtains a data frame of metadata describing all publicly available data
streams from the COVIDcast API. See the `data source and signals
documentation
<https://cmu-delphi.github.io/delphi-epidata/api/covidcast_signals.html>`_
for descriptions of the available sources.
:returns: A data frame containing one row per available signal, with the
following columns:
``data_source``
Data source name.
``signal``
Signal name.
``time_type``
Temporal resolution at which this signal is reported. "day", for
example, means the signal is reported daily.
``geo_type``
Geographic level for which this signal is available, such as county,
state, msa, hss, hrr, or nation. Most signals are available at multiple geographic
levels and will hence be listed in multiple rows with their own
metadata.
``min_time``
First day for which this signal is available. For weekly signals, will be
the first day of the epiweek.
``max_time``
Most recent day for which this signal is available. For weekly signals, will be
the first day of the epiweek.
``num_locations``
Number of distinct geographic locations available for this signal. For
example, if `geo_type` is county, the number of counties for which this
signal has ever been reported.
``min_value``
The smallest value that has ever been reported.
``max_value``
The largest value that has ever been reported.
``mean_value``
The arithmetic mean of all reported values.
``stdev_value``
The sample standard deviation of all reported values.
``last_update``
The UTC datetime for when the signal value was last updated.
``max_issue``
Most recent date data was issued.
``min_lag``
Smallest lag from observation to issue, in days.
``max_lag``
Largest lag from observation to issue, in days.
"""
meta = Epidata.covidcast_meta()
if meta["result"] != 1:
# Something failed in the API and we did not get real metadata
raise RuntimeError("Error when fetching metadata from the API",
meta["message"])
meta_df = pd.DataFrame.from_dict(meta["epidata"])
meta_df["min_time"] = meta_df.apply(lambda x: _parse_datetimes(x.min_time, x.time_type), axis=1)
meta_df["max_time"] = meta_df.apply(lambda x: _parse_datetimes(x.max_time, x.time_type), axis=1)
meta_df["last_update"] = pd.to_datetime(meta_df["last_update"], unit="s")
return meta_df
def aggregate_signals(signals: list, dt: list = None, join_type: str = "outer") -> pd.DataFrame:
"""Given a list of DataFrames, [optionally] lag each one and join them into one DataFrame.
This method takes a list of DataFrames containing signal information for
geographic regions across time, and outputs a single DataFrame with a column
for each signal value for each region/time. The ``data_source``,
``signal``, and index of each DataFrame in ``signals`` are appended to the
front of each output column name separated by underscores (e.g.
``source_signal_0_inputcolumn``), and the original data_source and signal
columns will be dropped. The input DataFrames must all be of the same
geography type, and a single ``geo_type`` column will be returned in the final
DataFrame.
Each signal's time value can be shifted for analysis on lagged signals using the ``dt``
argument, which takes a list of integer days to lag each signal's date. Lagging a signal by +1
day means that all the dates get shifted forward by 1 day (e.g. Jan 1 becomes Jan 2).
:param signals: List of DataFrames to join.
:param dt: List of lags in days for each of the input DataFrames in ``signals``.
Defaults to ``None``. When provided, must be the same length as ``signals``.
:param join_type: Type of join to be done between the DataFrames in ``signals``.
Defaults to ``"outer"``, so the output DataFrame contains all region/time
combinations at which at least one signal was observed.
:return: DataFrame of aggregated signals.
"""
if dt is not None and len(dt) != len(signals):
raise ValueError("Length of `dt` must be same as length of `signals`")
dt = [0] * len(signals) if not dt else dt
join_cols = ["time_value", "geo_value"]
dt_dfs = []
first_geo_type = _detect_metadata(signals[0])[2]
for i, (df, lag) in enumerate(zip(signals, dt)):
df_c = df.copy() # make a copy so we don't modify originals
source, sig_type, geo_type = _detect_metadata(df_c)
if geo_type != first_geo_type:
raise ValueError("Multiple geo_types detected. "
"All signals must have the same geo_type to be aggregated.")
df_c["time_value"] = [day + timedelta(lag) for day in df_c["time_value"]] # lag dates
df_c.drop(["signal", "data_source", "geo_type"], axis=1, inplace=True)
df_c.rename(
columns={j: f"{source}_{sig_type}_{i}_{j}" for j in df_c.columns if j not in join_cols},
inplace=True)
dt_dfs.append(df_c)
joined_df = reduce(lambda x, y: pd.merge(x, y, on=join_cols, how=join_type, sort=True), dt_dfs)
joined_df["geo_type"] = geo_type
return joined_df
def _parse_datetimes(date_int: int,
time_type: str,
date_format: str = "%Y%m%d") -> Union[pd.Timestamp]: # annotating nan errors
"""Convert a date or epiweeks string into timestamp objects.
Datetimes (length 8) are converted to their corresponding date, while epiweeks (length 6)
are converted to the date of the start of the week. Returns nan otherwise
Epiweeks use the CDC format.
:param date_int: Int representation of date.
:param date_format: String of the date format to parse.
:returns: Timestamp.
"""
date_str = str(date_int)
if time_type == "day":
return pd.to_datetime(date_str, format=date_format)
if time_type == "week":
epiwk = Week(int(date_str[:4]), int(date_str[-2:]))
return pd.to_datetime(epiwk.startdate())
return np.nan
def _detect_metadata(data: pd.DataFrame,
data_source_col: str = "data_source",
signal_col: str = "signal",
geo_type_col: str = "geo_type") -> Tuple:
"""Given a DataFrame, return the signal attributes of that DataFrame.
Raises ValueError if any of the columns are heterogeneous.
Inputs must have all three of the relevant columns: data source, signal, and geography type.
:param data: DataFrame with data_source, signal, and geo_tye columns.
:param data_source_col: name of column with data source info
:param signal_col: name of column with signal info
:param geo_type_col: name of column with geography type
:return: tuple of the three types
"""
unique_data_source_vals = data[data_source_col].unique()
unique_signal_col_vals = data[signal_col].unique()
unique_geo_type_vals = data[geo_type_col].unique()
if len(unique_data_source_vals) > 1:
raise ValueError("Multiple data sources detected.")
if len(unique_signal_col_vals) > 1:
raise ValueError("Multiple signals detected.")
if len(unique_geo_type_vals) > 1:
raise ValueError("Multiple geography types detected.")
return unique_data_source_vals[0], unique_signal_col_vals[0], unique_geo_type_vals[0]
def _fetch_epidata(data_source: str,
signal: str, # pylint: disable=W0621
start_day: date,
end_day: date,
geo_type: str,
geo_value: Union[str, Iterable[str]],
as_of: date,
issues: Union[date, tuple, list],
lag: int,
time_type: str = "day") -> Union[pd.DataFrame, None]:
"""Fetch data from Epidata API.
signal() wraps this to support fetching data over a range of dates
and stacks the resulting data frames.
If no data is found, return None, so signal() can easily filter out these
entries.
"""
as_of_str = _date_to_api_string(as_of, time_type) if as_of is not None else None
issues_strs = _dates_to_api_strings(issues, time_type) if issues is not None else None
cur_day = start_day
dfs = []
while cur_day <= end_day:
day_str = _date_to_api_string(cur_day, time_type)
day_data = Epidata.covidcast(data_source, signal, time_type=time_type,
geo_type=geo_type, time_values=day_str,
geo_value=geo_value, as_of=as_of_str,
issues=issues_strs, lag=lag)
# Two possible error conditions: no data or too much data.
if day_data["message"] == "no results":
warnings.warn(f"No {data_source} {signal} data found on {day_str} "
f"for geography '{geo_type}'",
NoDataWarning)
if day_data["message"] not in {"success", "no results"}:
warnings.warn(f"Problem obtaining {data_source} {signal} data on {day_str} "
f"for geography '{geo_type}': {day_data['message']}",
RuntimeWarning)
# In the too-much-data case, we continue to try putting the truncated
# data in our results. In the no-data case, skip this day entirely,
# since there is no "epidata" in the response.
if day_data.get("epidata"):
dfs.append(pd.DataFrame.from_dict(day_data["epidata"]))
cur_day += timedelta(1) if time_type == "day" else timedelta(7)
return dfs
def _async_fetch_epidata(data_source: str,
signal: str, # pylint: disable=W0621
start_day: date,
end_day: date,
geo_type: str,
geo_value: Union[str, Iterable[str]],
as_of: date,
issues: Union[date, tuple, list],
lag: int,
time_type: str = "day") -> Union[pd.DataFrame, None]:
"""Fetch data from Epidata API asynchronously.
signal() wraps this to support fetching data over a range of dates
and stacks the resulting data frames.
If no data is found, return None, so signal() can easily filter out these
entries.
"""
dfs = []
params = []
date_range = pd.date_range(start_day, end_day, freq="D" if time_type == "day" else "W")
for day in date_range:
day_param = {
"source": "covidcast",
"data_source": data_source,
"signals": signal,
"time_type": "day",
"geo_type": geo_type,
"geo_value": geo_value,
"time_values": _date_to_api_string(day, time_type),
}
if as_of:
day_param["as_of"] = _date_to_api_string(as_of, time_type)
if issues:
day_param["issues"] = _dates_to_api_strings(issues, time_type)
if lag:
day_param["lag"] = lag
params.append(day_param)
output = Epidata.async_epidata(params, batch_size=100)
for day_data, params in output:
if day_data["message"] == "no results":
warnings.warn(f"No {data_source} {signal} data found on {params['time_values']} "
f"for geography '{geo_type}'", NoDataWarning)
if day_data["message"] not in {"success", "no results"}:
warnings.warn(f"Problem obtaining {data_source} {signal} "
f"data on {params['time_values']} "
f"for geography '{geo_type}': {day_data['message']}", RuntimeWarning)
if day_data.get("epidata"):
dfs.append(pd.DataFrame.from_dict(day_data["epidata"]))
return dfs
def _signal_metadata(data_source: str,
signal: str, # pylint: disable=W0621
geo_type: str) -> dict:
"""Fetch metadata for a single signal as a dict."""
meta = metadata()
mask = ((meta.data_source == data_source) &
(meta.signal == signal) &
(meta.time_type == "day") &
(meta.geo_type == geo_type))
matches = meta[mask]
if matches.shape[0] == 0:
raise ValueError(f"Unable to find metadata for source '{data_source}', "
f"signal '{signal}', at '{geo_type}' resolution.")
assert matches.shape[0] == 1, "it should be impossible to have two identical signals"
output: dict = matches.to_dict("records")[0]
return output
def _date_to_api_string(date: date, time_type: str = "day") -> str: # pylint: disable=W0621
"""Convert a date object to a YYYYMMDD or YYYYMM string expected by the API."""
if time_type == "day":
date_str = date.strftime("%Y%m%d")
elif time_type == "week":
date_str = Week.fromdate(date).cdcformat()
return date_str
def _dates_to_api_strings(dates: Union[date, list, tuple], time_type: str = "day") -> str:
"""Convert a date object, or pair of (start, end) objects, to YYYYMMDD strings."""
if not isinstance(dates, (list, tuple)):
return _date_to_api_string(dates, time_type)
return "-".join(_date_to_api_string(date, time_type) for date in dates)
| 24,833 | 44.566972 | 100 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/covidcast/__init__.py | """Fetch data from Delphi's COVIDcast API.
The COVIDcast API provides daily updated data on the COVID-19 pandemic in the
United States, including cases, deaths, medical records, nationwide symptom
surveys, and other data collated by the Delphi research group at Carnegie Mellon
University.
Functions:
* signal - Fetch a Pandas data frame for one signal.
* metadata - Fetch metadata for all available signals.
"""
from .covidcast import signal, metadata, aggregate_signals, use_api_key
from .plotting import plot, plot_choropleth, get_geo_df, animate
from .geography import (fips_to_name, cbsa_to_name, abbr_to_name,
name_to_abbr, name_to_cbsa, name_to_fips,
fips_to_abbr, abbr_to_fips)
| 739 | 36 | 80 | py |
covidcast | covidcast-main/Python-packages/covidcast-py/covidcast/geography.py | """Functions for converting and mapping between geographic types."""
import re
import warnings
from typing import Union, Iterable
import pandas as pd
import pkg_resources
COUNTY_CENSUS = pd.read_csv(
pkg_resources.resource_filename(__name__, "geo_mappings/county_census.csv"), dtype=str)
MSA_CENSUS = pd.read_csv(
pkg_resources.resource_filename(__name__, "geo_mappings/msa_census.csv"), dtype=str)
STATE_CENSUS = pd.read_csv(
pkg_resources.resource_filename(__name__, "geo_mappings/state_census.csv"), dtype=str)
# Filter undesired rows from CSVs.
# They're not removed from the files to keep them identical to rda files.
STATE_CENSUS = STATE_CENSUS.loc[STATE_CENSUS.STATE != "0"]
# pad to 2 characters with leading 0s
STATE_CENSUS["STATE"] = STATE_CENSUS["STATE"].str.zfill(2)
# add 000 to the end to get a 5 digit code
STATE_CENSUS["STATE"] = STATE_CENSUS["STATE"].str.pad(width=5, fillchar="0", side="right")
# filter out micropolitan areas
MSA_CENSUS = MSA_CENSUS.loc[MSA_CENSUS.LSAD == "Metropolitan Statistical Area"]
def fips_to_name(code: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Look up county names by FIPS codes with regular expression support.
Given an individual or list of FIPS codes or regular expressions, look up the corresponding
county names.
:param code: Individual or list of FIPS codes or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of county names.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
return _lookup(code, COUNTY_CENSUS.FIPS, COUNTY_CENSUS.CTYNAME, ignore_case, fixed, ties_method)
def cbsa_to_name(code: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Look up MSA names by codes with regular expression support.
Given an individual or list of FIPS codes or regular expressions, look up the corresponding
MSA names.
:param code: Individual or list of FIPS codes or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of MSA names.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
return _lookup(code, MSA_CENSUS.CBSA, MSA_CENSUS.NAME, ignore_case, fixed, ties_method)
def abbr_to_name(abbr: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Look up state name by abbreviation with regular expression support.
Given an individual or list of state abbreviations or regular expressions, look up the
corresponding state names.
:param abbr: Individual or list of state abbreviations or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of state names.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
return _lookup(abbr, STATE_CENSUS.ABBR, STATE_CENSUS.NAME, ignore_case, fixed, ties_method)
def name_to_abbr(name: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Look up state abbreviation by name with regular expression support.
Given an individual or list of state names or regular expressions, look up the
corresponding state abbreviations.
:param name: Individual or list of state names or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of
state abbreviations. The returned list will be the same length as the input, with ``None`` or
``{}`` if no values are found for ``ties_method="first"`` and ``ties_method="all"``,
respectively.
"""
return _lookup(name, STATE_CENSUS.NAME, STATE_CENSUS.ABBR, ignore_case, fixed, ties_method)
def fips_to_abbr(code: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Look up state abbreviation by FIPS codes with regular expression support.
Given an individual or list of FIPS codes or regular expressions, look up the corresponding
state abbreviation. FIPS codes can be the 2 digit code (``covidcast.fips_to_abbr("12")``) or
the 2 digit code with 000 appended to the end (``covidcast.fips_to_abbr("12000")``.
:param code: Individual or list of FIPS codes or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of county names.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
return _lookup(code, STATE_CENSUS.STATE, STATE_CENSUS.ABBR, ignore_case, fixed, ties_method)
def name_to_cbsa(name: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first",
state: str = None) -> list:
"""Look up MSA codes by names with regular expression support.
Given an individual or list of names or regular expressions, look up the corresponding
MSA codes.
:param name: Individual or list of MSA names or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:param state: 2 letter state code, case insensitive, to restrict results to.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of MSA codes.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
if state:
state = state.upper()
df = MSA_CENSUS.loc[MSA_CENSUS.STATE == state]
else:
df = MSA_CENSUS
return _lookup(name, df.NAME, df.CBSA, ignore_case, fixed, ties_method)
def abbr_to_fips(code: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Look up state FIPS codes by abbreviation with regular expression support.
Given an individual or list of state abbreviations or regular expressions,
look up the corresponding state FIPS codes. The returned codes are 5 digits: the
2 digit state FIPS with 000 appended to the end.
:param code: Individual or list of abbreviations or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of county names.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
return _lookup(code, STATE_CENSUS.ABBR, STATE_CENSUS.STATE, ignore_case, fixed, ties_method)
def name_to_fips(name: Union[str, Iterable[str]],
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first",
state: str = None) -> list:
"""Look up FIPS codes by county names with regular expression support.
Given an individual or list of county names or regular expressions, look up the corresponding
FIPS codes.
:param name: Individual or list of county names or regular expressions.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If ``"all"``, then all matches for each code are returned.
Defaults to ``first``.
:param state: 2 letter state code, case insensitive, to restrict results to.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of FIPS.
The returned list will be the same length as the input, with ``None`` or ``{}`` if no values
are found for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
if state:
state = state.upper()
df = COUNTY_CENSUS.loc[COUNTY_CENSUS.STNAME == abbr_to_name(state)[0]]
else:
df = COUNTY_CENSUS
return _lookup(name, df.CTYNAME, df.FIPS, ignore_case, fixed, ties_method)
def _lookup(key: Union[str, Iterable[str]],
keys: Iterable,
values: Iterable,
ignore_case: bool = False,
fixed: bool = False,
ties_method: str = "first") -> list:
"""Given an input, search for it in a list of keys and return the corresponding values.
:param key: Individual or list of search strings or regular expression patterns.
:param keys: List of keys to be searched
:param values: List of values that correspond to keys.
:param ignore_case: Boolean for whether or not to be case insensitive in the regular expression.
If ``fixed=True``, this argument is ignored. Defaults to ``False``.
:param fixed: Conduct an exact case sensitive match with the input string.
Defaults to ``False``.
:param ties_method: Method for determining how to deal with multiple outputs for a given input.
Must be one of ``"all"`` or ``"first"``. If ``"first"``, then only the first match for each
code is returned. If `"all"`, then all matches for each code are returned.
Defaults to ``first``.
:return: If ``ties_method="first"``, returns a list of the first value found for each input key.
If ``ties_method="all"``, returns a list of dicts, one for each input, with keys
corresponding to all matched input keys and values corresponding to the list of values.
The returned list will be the same length as the input, with None or {} if no values are found
for ``ties_method="first"`` and ``ties_method="all"``, respectively.
"""
if ties_method not in ("first", "all"):
raise ValueError("Invalid `ties_method`. Must be one of `first` or `all`.")
key = [key] if isinstance(key, str) else key
case = re.IGNORECASE if (ignore_case and not fixed) else 0
output = []
for i in key:
result: dict = {}
for k, v in zip(keys, values):
if i == k if fixed else re.search(i, k, case):
result[k] = result.get(k, []) + [v]
output.append(result)
if ties_method == "first":
return _get_first_tie(output)
return output
def _get_first_tie(dict_list: list) -> list:
"""Return a list with the first value for the first key for each of the input dicts.
Needs to be Python 3.6+ for this to work, since earlier versions don't preserve insertion order.
:param dict_list: List of str:list dicts.
:return: list of the first key and first value for that key for each of the input dicts.
"""
not_unique = False
for d in dict_list:
if len(d) > 1 or any(len(val) > 1 for val in d.values()):
not_unique = True
if not_unique:
warnings.warn("Some inputs were not uniquely matched; returning only the first match "
"in each case. To return all matches, set `ties_method='all'`")
# first entry of first value
return [list(d.values())[0][0] if d else None for d in dict_list]
| 17,755 | 54.836478 | 100 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-2.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "state")
covidcast.plot(data)
plt.show() | 238 | 38.833333 | 126 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-7.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day=date(2020, 8, 4), end_day=date(2020, 8, 4), geo_type="county")
geo_data = covidcast.get_geo_df(data)
CA = geo_data.loc[geo_data.state_fips == "06",:]
CA = CA.to_crs("EPSG:3395")
CA.plot(column="value", figsize=(5,5), legend=True)
plt.axis("off")
plt.show() | 399 | 39 | 125 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-1.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "county")
covidcast.plot(data)
plt.show() | 239 | 39 | 127 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-5.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "msa")
covidcast.plot(data, plot_type="bubble")
plt.show() | 256 | 41.833333 | 124 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-3.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "msa")
covidcast.plot(data)
plt.show() | 236 | 38.5 | 124 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-4.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "hrr")
covidcast.plot(data)
plt.show() | 236 | 38.5 | 124 | py |
covidcast | covidcast-main/docs/covidcast-py/plot_directive/plot_examples-6.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day=date(2020,8,3), end_day=date(2020,8,4), geo_type="county")
covidcast.plot(data, cmap="viridis", edgecolor="0.8")
plt.show() | 266 | 43.5 | 121 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/ff9d0adfb7c8ffca0588503bd7c99492/plot_examples-6.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day=date(2020,8,3), end_day=date(2020,8,4), geo_type="county")
covidcast.plot(data, cmap="viridis", edgecolor="0.8")
plt.show() | 266 | 43.5 | 121 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/e0a8df2a9fc8b7b132f415bd8ef2bdc4/plot_examples-7.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day=date(2020, 8, 4), end_day=date(2020, 8, 4), geo_type="county")
geo_data = covidcast.get_geo_df(data)
CA = geo_data.loc[geo_data.state_fips == "06",:]
CA = CA.to_crs("EPSG:3395")
CA.plot(column="value", figsize=(5,5), legend=True)
plt.axis("off")
plt.show() | 399 | 39 | 125 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/45737588e68d40bc70776db507250bec/plot_examples-2.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "state")
covidcast.plot(data)
plt.show() | 238 | 38.833333 | 126 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/571325b5380a37ed852fc9bdad934c81/plot_examples-3.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "msa")
covidcast.plot(data)
plt.show() | 236 | 38.5 | 124 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/f1cd4c7dca77dd16a6bb13e5255f47b8/plot_examples-4.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "hrr")
covidcast.plot(data)
plt.show() | 236 | 38.5 | 124 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/c366b60b64ba94fc749687d0d5a05bf0/plot_examples-1.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "county")
covidcast.plot(data)
plt.show() | 239 | 39 | 127 | py |
covidcast | covidcast-main/docs/covidcast-py/html/_downloads/376487b264df9d30227aca49c0e60421/plot_examples-5.py | import covidcast
from datetime import date
from matplotlib import pyplot as plt
data = covidcast.signal("fb-survey", "smoothed_cli", start_day = date(2020,8,4), end_day = date(2020,8,4), geo_type = "msa")
covidcast.plot(data, plot_type="bubble")
plt.show() | 256 | 41.833333 | 124 | py |
UL-CALC | UL-CALC-master/upper_limit.py | '''
Program to create artificial halo into visibility and estimate upper limit to halo flux
NOTE: Currently this program needs visibilities with a single spectral window
--------------------------------------------------------
Main Program
STAGES:
1) Estimate RMS : BANE is used to estimate the rms in a defined region of the image.
This value will be used to estimate the threshold of cleaning
as well as the flux of the halo to be injected.
2) Create Halo : Halo image is created at given position based on certain parameters
3) Add to MS file : Halo image is extrapolated to all BW frequencies, Fourier transformed
then added to input visibilities in a new MS file
4) Run CLEAN again : The CASA task tclean is run on new MS file
5) Convolve : Both the original image and the newly created image are convolved.
Beam parameters have to be either provided or a certain factor times
original beam is taken
6) Upper limits : Calculate upper limits using the excess flux estimated between original
image and injected halo image
--------------------------------------------------------
'''
import os, sys, shutil, subprocess, glob
from astropy.modeling.models import Gaussian2D, Polynomial1D, ExponentialCutoffPowerLaw1D
from astropy.cosmology import Planck15 as cosmo
from astroquery.ned import Ned
import numpy as np
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
execfile('modules.py')
logger.info('Running upper limit estimator...')
print('Output log file: {}'.format(logname))
# Set Halo location (based on cluster location or manually)
if cluster != '': x0,y0 = getCoords(imgpath, cluster)
else: x0,y0 = imhead(imgpath)['refpix'][0], imhead(imgpath)['refpix'][1]
# Calculate image RMS
img_rms = estimateRMS(imgpath, x0, y0, rms_reg)
#thresh = thresh_f * img_rms
# Calculate flux list (based on RMS or manually)
if do_fac: flx_list = [f * img_rms for f in flx_fac]
else: flx_list = flx_lst
# Convolve original image
logger.info('Convolving original image and getting statistics...')
i1_conv = '.'.join(imgpath.split('.')[:-1]) + '.conv'
i1_conv = myConvolve(imgpath, i1_conv, bopts)
i1_stats = getStats(i1_conv, x0, y0, radius)
# logger.info('Done!')
if i1_stats['flux'][0] < 0.:
logger.info('NOTE: Estimated flux density in original image is negative. \
Alternate method will be used to estimate excess flux.\n')
# Calculate upper limits
c = 0
for flux in flx_list:
recovery = recoveredFlux(i1_stats, flux, img_rms)
if recovery > recv_th:
logger.info('\n#####\nRecovery threshold reached. Repeating process for new flux values...\n#####')
break
c = c+1
# Fine tuning
if recovery > recv_th:
if 0 < c < len(flx_list):
new_flx_list = np.linspace(flx_list[c], flx_list[c - 1], num=n_split, endpoint=False)
new_flx_list = new_flx_list[1:]
for flux in new_flx_list:
recoveredFlux(i1_stats, flux, img_rms)
elif c == 0:
new_flx_list = [f * img_rms for f in np.arange(0, flx_fac[c], n_split)]
new_flx_list = new_flx_list[:0:-1]
for flux in new_flx_list:
recoveredFlux(i1_stats, flux, img_rms)
####--------------------------------XXXX------------------------------------#### | 3,300 | 39.753086 | 106 | py |
UL-CALC | UL-CALC-master/modules.py | execfile('params.py')
execfile('mylogging.py')
def prompt_with_timeout(t):
import sys
from select import select
print('Press Enter to continue...')
rlist, _, _ = select([sys.stdin], [], [], t)
if rlist:
s = sys.stdin.readline()
else:
print("No input received. Moving on...")
def replaceCheck(dir1, dir2):
if os.path.exists(dir1):
ovr = raw_input('Directory already exists. Overwrite? (y/n): ')
if ovr in ('y', 'Y'):
shutil.rmtree(dir1)
shutil.copytree(dir2, dir1)
elif ovr in ('n', 'N'):
print('Exiting now.')
sys.exit(1)
else:
shutil.copytree(dir2, dir1)
def getCoords(image, cluster):
ia.open(image)
ra = np.deg2rad(float(Ned.query_object(cluster)['RA']))
dec = np.deg2rad(float(Ned.query_object(cluster)['DEC']))
w = [ra, dec]
x0 = np.round(ia.topixel(w)['numeric'][0])
y0 = np.round(ia.topixel(w)['numeric'][1])
ia.close()
return x0, y0
def freqInfo(visfile):
'''
Function to print all the frequencies in a visibility spectral window
INPUT : visibility file
OUTPUT: array of all frequencies in the spectral window
'''
msmd.open(visfile)
freq = msmd.chanfreqs(0)
msmd.done()
return freq
def createHalo(ref_image, centre_x, centre_y, size, totflux, ftype):
'''
Function to create a halo of given flux at reference frequency of input image
INPUT:
ref_image: input image without halo
centre_x: x position where halo should be added
centre_y: y position where halo should be added
size : size of the halo (in pixels)
totflux : total flux of the halo
ftype : type of spatial distribution in halo
'gaussian' : I(r) = I0/(sig*sqrt(2*pi))*exp((x-x0)**2+(y-y0)**2/(2*sig**2))
(sig=FWHM/(2*sqrt(2ln2)) where FWHM=Halo Diameter)
'polynomial' : I(r) = -0.719*r**2 + 1.867*r - 0.141 (r = Normalized Distance)
'exponential' : I(r) = I0 * exp(r/re) [re = 2.6 * Rh] (Rh = Halo Radius)
OUTPUT:
output image with halo
'''
ref_halo = '.'.join(ref_image.split('.')[:-1]) + '_reffreq_flux_{:f}Jy_{}.image'.\
format(totflux, ftype)
logger.info('Creating halo image - {}'.format(ref_halo.split('/')[-1]))
replaceCheck(ref_halo, ref_image)
ia.open(ref_halo)
image_x = imhead(ref_halo)['shape'][0]
image_y = imhead(ref_halo)['shape'][1]
newim = np.zeros([image_x, image_y])
Y, X = np.meshgrid(np.arange(image_y), np.arange(image_x))
if ftype == 'G':
rh = (size/2.0)/(2.0*np.sqrt(2.0*np.log(2.0)))
g = Gaussian2D(totflux/(rh*np.sqrt(2*np.pi)),
centre_x, centre_y, rh, rh)
newim = g(X, Y)
elif ftype == 'P':
rh = size/2.
p = Polynomial1D(2, c0=-0.141, c1=1.867, c2=-0.719)
Z = np.sqrt((X - centre_x)**2 + (Y - centre_y)**2)
r = Z/rh
newim = totflux * np.where(r<1.0, p(1-r)-p(0), 0)
elif ftype == 'E':
rh = size/2.
e = ExponentialCutoffPowerLaw1D(
amplitude=totflux, alpha=0.0, x_cutoff=rh/2.6)
Z = np.sqrt((X - centre_x)**2 + (Y - centre_y)**2)
newim = e(Z)
logger.debug('{: <30s}{: >15f}'.format('Unnormalised Total flux:',np.sum(newim)))
ratio = totflux/np.sum(newim)
beam2 = ratio*newim
logger.debug('{: <30s}{: >15f}'.format('Scaled Total Flux:',np.sum(beam2)))
ia.putchunk(beam2)
# logger.info('Created halo with total flux density [[{:f} mJy]] and profile [[{}]] \
# at redshift [[z={}]] with size [[{:.2f} Mpc]].\n'.format(totflux*1.e3, ftype, z, l/1.e3))
logger.info('Created halo image with total flux density [{:.2f} mJy]'.format(totflux*1.e3))
ia.close()
return ref_halo
def addHaloVis(visfile, halofile, flux, spix):
'''
Function to add artificial halo to source visibilities
INPUT:
visfile : visiblity file
halofile: halo image file
spix : spectral index of halo to be assumed
OUTPUT:
visibility file with halo added
'''
freq = freqInfo(visfile)
myms = '.'.join(visfile.split('.')[:-1]) + \
'_wHalo_flux_{:f}.MS'.format(flux)
logger.info('Creating modified visibility file - {}'.format(myms.split('/')[-1]))
replaceCheck(myms, visfile)
reffreq = np.max([imhead(imgpath)['refval'][2],
imhead(imgpath)['refval'][3]])
logger.debug('Halo Reference frequency = {:.2f} MHz'.format(reffreq/1.e6))
logger.info('Scaling halo flux to spw frequencies...')
for j, f in enumerate(freq):
try:
newhalo = 'haloimg_freq_{:.2f}_flux_{:.1f}.image'.format(
f/1.e6, flux)
expr = 'IM0*' + str(f/reffreq) + '^' + str(spix)
immath(imagename=halofile, expr=expr, outfile=newhalo)
default(ft)
# ft(vis=myms, model=newhalo, spw='0:'+str(j), incremental=True, usescratch=True)
ft(vis=myms, model=newhalo, spw='0:'+str(j), usescratch=True)
shutil.rmtree(newhalo)
except Exception as e:
logger.error('Something went wrong. Check for error!')
logger.error(e)
break
default(uvsub)
uvsub(vis=myms, reverse=True)
# logger.info('Done!')
logger.info('Visibility file with halo created!')
return myms
def cleanup(loc):
from glob import glob
extns = ['psf', 'flux', 'pb', 'sumwt', 'mask', 'model']
to_be_deleted = [fname for extn in extns for fname in glob(loc+'/*.'+extn)]
for f in to_be_deleted:
try:
shutil.rmtree(f)
except Exception as e:
logger.error(e)
def getStats(inpimage, x0, y0, radius):
r = str(radius) + 'arcsec'
ia.open(inpimage)
ra = ia.toworld([x0, y0], 's')['string'][0]
dec = ia.toworld([x0, y0], 's')['string'][1]
reg = 'circle[[{}, {}], {}]'.format(ra, dec, r)
stats = imstat(imagename=inpimage, region=reg, axes=[0, 1])
ia.close()
return stats
def myConvolve(inpimage, output, bopts):
inp_beam = imhead(inpimage)['restoringbeam']
if inp_beam['major']['unit'] == 'arcsec':
inp_bmaj = inp_beam['major']['value']
inp_bmin = inp_beam['minor']['value']
elif inp_beam['major']['unit'] == 'deg':
inp_bmaj = inp_beam['major']['value']*3600.
inp_bmin = inp_beam['minor']['value']*3600.
if bopts == 'beam':
bmaj = bparams[0]
bmin = bparams[1]
bpa = bparams[2]
elif bopts == 'factor':
bmaj = smooth_f * inp_bmaj
bmin = smooth_f * inp_bmin
bpa = imhead(inpimage)['restoringbeam']['positionangle']['value']
elif bopts == 'num_of_beams':
bmaj = np.round(np.sqrt(np.pi*(radius)**2/nbeams))
if bmaj < inp_bmaj:
print('Chosen beam size is smaller than input beam. Increasing beam size appropriately.')
bmaj = bmaj * np.ceil(2*inp_bmaj/bmaj)
bmin = bmaj
bpa = 0.0
default(imsmooth)
imsmooth(imagename=inpimage, targetres=True, major=qa.quantity(bmaj, 'arcsec'),
minor=qa.quantity(bmin, 'arcsec'), pa=qa.quantity(bpa, 'deg'), outfile=output, overwrite=True)
return output
def estimateRMS(inpimage, x0, y0, radius):
logger.info('Estimating RMS in {} around ({}, {}) with radius {:.2f}\'...'.
format(inpimage.split('/')[-1], x0, y0, radius/60.))
fitsfile = '.'.join(inpimage.split('.')[:-1]) + '.fits'
exportfits(imagename=inpimage, fitsimage=fitsfile, overwrite=True)
subprocess.call([bane_pth, fitsfile])
rmsfile = '.'.join(inpimage.split('.')[:-1]) + '_rms.fits'
bkgfile = '.'.join(inpimage.split('.')[:-1]) + '_bkg.fits'
rmsimage = '.'.join(inpimage.split('.')[:-1]) + '_rms.image'
importfits(fitsimage=rmsfile, imagename=rmsimage, overwrite=True)
rms = getStats(rmsimage, x0, y0, radius)['rms'][0]
os.remove(fitsfile)
os.remove(rmsfile)
os.remove(bkgfile)
shutil.rmtree(rmsimage)
logger.info('RMS estimated to be {:.3f} mJy/beam.'.format(rms*1.e3))
return rms
def run_imaging(visfile, task, output, rms):
logger.info('Running deconvolution using task {}:'.format(task))
if task == 'tclean':
default(tclean)
tclean(vis=visfile, imagename=output, niter=N, threshold=thresh_f*rms, deconvolver=dcv,
scales=scle, imsize=isize, cell=csize, weighting=weight, robust=rbst,
gridder=grdr, wprojplanes=wproj,
savemodel='modelcolumn', aterm=False, pblimit=0.0, wbawp=False)
elif task == 'wsclean':
chgc_command = 'chgcentre -f -minw -shiftback {}'.format(visfile)
subprocess.call(chgc_command.split())
clean_command = 'wsclean -mem 25 -name {} -weight {} {} -size {} {} -scale {} -niter {} -auto-threshold {} -multiscale -multiscale-scale-bias 0.7 -pol RR {}'.format(
output, weight, rbst, isize, isize, cell/3600, N, thresh_f, visfile)
subprocess.call(clean_command.split())
def calculateExcess(stats, output, x, y, r, bopts):
# Convolve new images (Set last parameter as either 'factor' OR 'beam')
logger.info('Convolving new image and getting statistics...')
if cln_task == 'wsclean':
importfits(fitsimage=output + '-image.fits', imagename=output + '.image')
i2_conv = output + '.conv'
i2_conv = myConvolve(output + '.image', i2_conv, bopts)
i2_stats = getStats(i2_conv, x, y, r)
# logger.info('Done!\n')
excessFlux = i2_stats['flux'][0] - stats['flux'][0]
recovery = excessFlux / stats['flux'][0] * 100.
logger.info('Excess flux in central {:.2f}\' region = {:.2f} mJy'.format(
theta / 60., excessFlux * 1.e3))
logger.info('Halo flux recovered = {:.2f}%\n----\n'.format(recovery))
return recovery
def recoveredFlux(stats, flux, rms):
haloimg = createHalo(imgpath, x0, y0, hsize, flux, ftype)
newvis = addHaloVis(vispath, haloimg, flux, alpha)
otpt = '.'.join(imgpath.split('.')[:-1]) + '_wHalo_flux_{:f}'.format(flux)
while True:
try:
run_imaging(newvis, cln_task, otpt, rms)
# logger.info('Done!')
break
except Exception as e:
logger.error('Something went wrong. Please try again!')
logger.error(e)
sys.exit(1)
recv = calculateExcess(stats, otpt, x0, y0, radius, bopts)
cleanup(srcdir)
clearcal(vis=vispath)
clearstat()
if do_cntrs:
execfile('create_contours.py')
prompt_with_timeout(60)
return recv
####--------------------------------XXXX------------------------------------####
| 10,795 | 37.834532 | 173 | py |
UL-CALC | UL-CALC-master/create_contours.py | import sys, os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse,Rectangle
from astropy.wcs import WCS
from astropy.visualization import ZScaleInterval,PercentileInterval,MinMaxInterval
from astropy.io import fits
from astropy.nddata.utils import Cutout2D
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
from astroquery.ned import Ned
from astropy.coordinates import SkyCoord
from astropy.cosmology import Planck15 as cosmo
ll = 2000 # Physical size of the image
itvl = 'PercentileInterval(99)' # Colorbar interval
cmap = 'gist_heat_r' # Color map
num_cont= 10 # Number of contours
fname = otpt + '.fits'
exportfits(imagename=otpt+'.image', fitsimage=fname, overwrite=True)
hdu = fits.open(fname)[0]
wcs = WCS(hdu.header, naxis=2)
newdata = np.squeeze(hdu.data)
lvls = np.array([-1.0])
for i in np.arange(num_cont):
lvls = np.append(lvls, np.sqrt(2**i))
lvls = 3 * img_rms * lvls
try:
# Creating cutouts
s1 = (ll/cosmo.kpc_proper_per_arcmin(z).value)
size = u.Quantity((s1,s1), u.arcmin)
x0 = hdu.header['CRPIX1']
y0 = hdu.header['CRPIX2']
ra = float(Ned.query_object(cluster)['RA'])
dec = float(Ned.query_object(cluster)['DEC'])
ra0 = hdu.header['CRVAL1']
dec0 = hdu.header['CRVAL2']
del_a = hdu.header['CDELT1']
del_d = hdu.header['CDELT2']
x = int(np.round(x0 + (ra-ra0)*np.cos(np.deg2rad(np.mean((dec,dec0))))/del_a))
y = int(np.round(y0 + (dec-dec0)/del_d))
pos = (x,y)
cutout = Cutout2D(newdata, pos, size, wcs=wcs)
newx0 = cutout.wcs.wcs.crpix[0]
newy0 = cutout.wcs.wcs.crpix[1]
newx = len(cutout.data)
newy = len(cutout.data)
# Setting colorbar levels
interval= eval(itvl)
vmin, vmax = interval.get_limits(cutout.data)
# Plotting
im = plt.imshow(cutout.data, vmin=vmin, vmax=vmax, origin='lower',cmap=cmap)
plt.contour(cutout.data, levels=lvls, colors='blue', alpha=0.5)
plt.grid(color='white', ls='dashed', alpha=0.5)
freq = hdu.header['CRVAL3']
plt.title('{:s} ({:.0f} MHz) [rms={:.2f} mJy/beam]'.format(cluster, freq/1.e6, img_rms))
plt.colorbar(im)
plt.pause(10)
plt.close("all")
except Exception as e:
print('ERROR for {}:\n{}\n'.format(cluster,e))
####--------------------------------XXXX------------------------------------####
| 2,501 | 33.75 | 92 | py |
UL-CALC | UL-CALC-master/mylogging.py | import logging
import datetime
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logger = logging.getLogger()
now = datetime.datetime.now()
t = now.strftime("%Y%m%d-%H%M%S")
logname = 'ulc-{}-{}.log'.format(cluster.replace(' ',''), t)
logging.basicConfig(
filename=srcdir + '/' + logname,
level=logging.INFO,
format="%(asctime)s - %(name)s - %(message)s",
datefmt='%m/%d/%Y %I:%M:%S %p',
)
# set up logging to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(process)d:[%(levelname)s]: %(message)s')
console.setFormatter(formatter)
# add the handler to the logger
logger.addHandler(console)
####--------------------------------XXXX------------------------------------#### | 816 | 29.259259 | 80 | py |
UL-CALC | UL-CALC-master/params.py | # INPUT PARAMETERS (Descriptions at the end)
bane_pth= '/path/to/BANE'
srcdir = '/path/to/source/directory'
visname = '<source>.MS'
imgname = '<source>.IMAGE'
vispath = os.path.join(srcdir, visname)
imgpath = os.path.join(srcdir, imgname)
cluster = '<source-name>'
if cluster != '':
z = float(Ned.query_object(cluster)['Redshift'])
else:
z = 0.1
l = 1000
alpha = -1.3
ftype = 'E'
# ESTIMATED PARAMETERS
theta = (l/cosmo.kpc_proper_per_arcmin(z).value)*60.
cell = np.rad2deg(imhead(imgpath)['incr'][1])*3600
hsize = theta/cell
# FLUX LEVELS
do_fac = True
flx_fac = [50, 100, 200, 300, 500, 1000, 2000]
flx_lst = []
# CLEAN PARAMETERS
cln_task= 'tclean'
N = 1000000
isize = imhead(imgpath)['shape'][0]
csize = str(cell) + 'arcsec'
weight = 'briggs'
rbst = 0.0
grdr = 'widefield'
wproj = -1
dcv = 'multiscale'
scle = [0,5,15,30]
thresh_f= 3
# REGION SIZE (in arcsec)
radius = theta/2.
rms_reg = 3 * radius
# SMOOTH PARAMETERS
bopts = 'num_of_beams'
nbeams = 100
bparams = (20.0, 20.0, 0.0)
smooth_f= 2
# RECOVERY PARAMETERS
recv_th = 10.0
n_split = 6
do_cntrs= True
####
# bane_pth = Path to BANE executable
# srcdir = Source Directory
# visname = Reference visibility file
# imgname = Reference image file made from 'visname'
# z = Redshift of source
# cluster = Cluster name (optional)
# l = Size of halo to be injected (kpc)
# alpha = Spectral index for frequency scaling (S = k*v^(-a))
# ftype = Radial profile of halo. Options: (G)aussian, (P)olynomial, (E)xponential (Currently E and G work best)
# theta = Angular size (in arcsec) for halo (size=l) at redshift z
# x0, y0 = Halo injection position
# cell = Pixel separation (in arcsec)
# hsize = Size of halo (in pixels)
# do_fac = Whether to use flux factors or flux list
# flx_fac = Flux level factors
# flx_lst = Manually provided flux list
# cln_task = Clean task to use ('tclean', 'wsclean')
# N = No. of iterations
# csize = Cell size
# weight = Weighting to be used
# dcv = Deconvolver to use
# scle = Multi-scale options
# thresh_f = Cleaning threshold factor
# radius = Radius of halo (in arcsec)
# rms_reg = Region from which to estimate rms
# bopts = Smoothing option ('num_of_beams', 'factor', 'beam')
# nbeams = No. of synthesized beams in halo
# bparams = Beam size (bmaj("), bmin("), bpa(deg))
# smooth_f = Factor to smooth input beam
# recv_th = Threshold of Excess flux recovery at which to fine tune (in percent)
# n_split = Number of levels to split during fine tuning (Default: 6)
# do_cntrs = Whether to do create python images with contours or not
####--------------------------------XXXX------------------------------------####
| 2,663 | 28.6 | 113 | py |
TreEnhance | TreEnhance-master/ptcolor.py | """Pytorch routines for color conversions and management.
All color arguments are given as 4-dimensional tensors representing
batch of images (Bx3xHxW). RGB values are supposed to be in the
range 0-1 (but values outside the range are tolerated).
Some examples:
>>> rgb = torch.tensor([0.8, 0.4, 0.2]).view(1, 3, 1, 1)
>>> lab = rgb2lab(rgb)
>>> print(lab.view(-1))
tensor([54.6400, 36.9148, 46.1227])
>>> rgb2 = lab2rgb(lab)
>>> print(rgb2.view(-1))
tensor([0.8000, 0.4000, 0.2000])
>>> rgb3 = torch.tensor([0.1333,0.0549,0.0392]).view(1, 3, 1, 1)
>>> lab3 = rgb2lab(rgb3)
>>> print(lab3.view(-1))
tensor([6.1062, 9.3593, 5.2129])
"""
import torch
def _t(data):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return torch.tensor(data, requires_grad=False, dtype=torch.float32, device=device)
def _mul(coeffs, image):
coeffs = coeffs.to(image.device).view(3, 3, 1, 1)
return torch.nn.functional.conv2d(image, coeffs)
_RGB_TO_XYZ = {
"srgb": _t([[0.4124564, 0.3575761, 0.1804375],
[0.2126729, 0.7151522, 0.0721750],
[0.0193339, 0.1191920, 0.9503041]]),
"prophoto": _t([[0.7976749, 0.1351917, 0.0313534],
[0.2880402, 0.7118741, 0.0000857],
[0.0000000, 0.0000000, 0.8252100]])
}
_XYZ_TO_RGB = {
"srgb": _t([[3.2404542, -1.5371385, -0.4985314],
[-0.9692660, 1.8760108, 0.0415560],
[0.0556434, -0.2040259, 1.0572252]]),
"prophoto": _t([[1.3459433, -0.2556075, -0.0511118],
[-0.5445989, 1.5081673, 0.0205351],
[0.0000000, 0.0000000, 1.2118128]])
}
WHITE_POINTS = {item[0]: _t(item[1:]).view(1, 3, 1, 1) for item in [
("a", 1.0985, 1.0000, 0.3558),
("b", 0.9807, 1.0000, 1.1822),
("e", 1.0000, 1.0000, 1.0000),
("d50", 0.9642, 1.0000, 0.8251),
("d55", 0.9568, 1.0000, 0.9214),
("d65", 0.9504, 1.0000, 1.0888),
("icc", 0.9642, 1.0000, 0.8249)
]}
_EPSILON = 0.008856
_KAPPA = 903.3
_XYZ_TO_LAB = _t([[0.0, 116.0, 0.], [500.0, -500.0, 0.], [0.0, 200.0, -200.0]])
_LAB_TO_XYZ = _t([[1.0 / 116.0, 1.0 / 500.0, 0], [1.0 / 116.0, 0, 0], [1.0 / 116.0, 0, -1.0 / 200.0]])
_LAB_OFF = _t([16.0, 0.0, 0.0]).view(1, 3, 1, 1)
def apply_gamma(rgb, gamma="srgb"):
"""Linear to gamma rgb.
Assume that rgb values are in the [0, 1] range (but values outside are tolerated).
gamma can be "srgb", a real-valued exponent, or None.
>>> apply_gamma(torch.tensor([0.5, 0.4, 0.1]).view([1, 3, 1, 1]), 0.5).view(-1)
tensor([0.2500, 0.1600, 0.0100])
"""
if gamma == "srgb":
T = 0.0031308
rgb1 = torch.max(rgb, rgb.new_tensor(T))
return torch.where(rgb < T, 12.92 * rgb, (1.055 * torch.pow(torch.abs(rgb1), 1 / 2.4) - 0.055))
elif gamma is None:
return rgb
else:
return torch.pow(torch.max(rgb, rgb.new_tensor(0.0)), 1.0 / gamma)
def remove_gamma(rgb, gamma="srgb"):
"""Gamma to linear rgb.
Assume that rgb values are in the [0, 1] range (but values outside are tolerated).
gamma can be "srgb", a real-valued exponent, or None.
>>> remove_gamma(apply_gamma(torch.tensor([0.001, 0.3, 0.4])))
tensor([0.0010, 0.3000, 0.4000])
>>> remove_gamma(torch.tensor([0.5, 0.4, 0.1]).view([1, 3, 1, 1]), 2.0).view(-1)
tensor([0.2500, 0.1600, 0.0100])
"""
if gamma == "srgb":
T = 0.04045
rgb1 = torch.max(rgb, rgb.new_tensor(T))
return torch.where(rgb < T, rgb / 12.92, torch.pow(torch.abs(rgb1 + 0.055) / 1.055, 2.4))
elif gamma is None:
return rgb
else:
res = torch.pow(torch.max(rgb, rgb.new_tensor(0.0)), gamma) + \
torch.min(rgb, rgb.new_tensor(0.0))
return res
def rgb2xyz(rgb, gamma_correction="srgb", clip_rgb=False, space="srgb"):
"""sRGB to XYZ conversion.
rgb: Bx3xHxW
return: Bx3xHxW
>>> rgb2xyz(torch.tensor([0., 0., 0.]).view(1, 3, 1, 1)).view(-1)
tensor([0., 0., 0.])
>>> rgb2xyz(torch.tensor([0., 0.75, 0.]).view(1, 3, 1, 1)).view(-1)
tensor([0.1868, 0.3737, 0.0623])
>>> rgb2xyz(torch.tensor([0.4, 0.8, 0.2]).view(1, 3, 1, 1), gamma_correction=None).view(-1)
tensor([0.4871, 0.6716, 0.2931])
>>> rgb2xyz(torch.ones(2, 3, 4, 5)).size()
torch.Size([2, 3, 4, 5])
>>> xyz2rgb(torch.tensor([-1, 2., 0.]).view(1, 3, 1, 1), clip_rgb=True).view(-1)
tensor([0.0000, 1.0000, 0.0000])
>>> rgb2xyz(torch.tensor([0.4, 0.8, 0.2]).view(1, 3, 1, 1), gamma_correction=None, space='prophoto').view(-1)
tensor([0.4335, 0.6847, 0.1650])
"""
if clip_rgb:
rgb = torch.clamp(rgb, 0, 1)
rgb = remove_gamma(rgb, gamma_correction)
return _mul(_RGB_TO_XYZ[space], rgb)
def xyz2rgb(xyz, gamma_correction="srgb", clip_rgb=False, space="srgb"):
"""XYZ to sRGB conversion.
rgb: Bx3xHxW
return: Bx3xHxW
>>> xyz2rgb(torch.tensor([0., 0., 0.]).view(1, 3, 1, 1)).view(-1)
tensor([0., 0., 0.])
>>> xyz2rgb(torch.tensor([0.04, 0.02, 0.05]).view(1, 3, 1, 1)).view(-1)
tensor([0.3014, 0.0107, 0.2503])
>>> xyz2rgb(torch.ones(2, 3, 4, 5)).size()
torch.Size([2, 3, 4, 5])
>>> xyz2rgb(torch.tensor([-1, 2., 0.]).view(1, 3, 1, 1), clip_rgb=True).view(-1)
tensor([0.0000, 1.0000, 0.0000])
"""
rgb = _mul(_XYZ_TO_RGB[space], xyz)
if clip_rgb:
rgb = torch.clamp(rgb, 0, 1)
rgb = apply_gamma(rgb, gamma_correction)
return rgb
def _lab_f(x):
x1 = torch.max(x, x.new_tensor(_EPSILON))
return torch.where(x > _EPSILON, torch.pow(x1, 1.0 / 3), (_KAPPA * x + 16.0) / 116.0)
def xyz2lab(xyz, white_point="d65"):
"""XYZ to Lab conversion.
xyz: Bx3xHxW
return: Bx3xHxW
>>> xyz2lab(torch.tensor([0., 0., 0.]).view(1, 3, 1, 1)).view(-1)
tensor([0., 0., 0.])
>>> xyz2lab(torch.tensor([0.4, 0.2, 0.1]).view(1, 3, 1, 1)).view(-1)
tensor([51.8372, 82.3018, 26.7245])
>>> xyz2lab(torch.tensor([1., 1., 1.]).view(1, 3, 1, 1), white_point="e").view(-1)
tensor([100., 0., 0.])
"""
xyz = xyz / WHITE_POINTS[white_point].to(xyz.device)
f_xyz = _lab_f(xyz)
return _mul(_XYZ_TO_LAB, f_xyz) - _LAB_OFF.to(xyz.device)
def _inv_lab_f(x):
x3 = torch.max(x, x.new_tensor(_EPSILON)) ** 3
return torch.where(x3 > _EPSILON, x3, (116.0 * x - 16.0) / _KAPPA)
def lab2xyz(lab, white_point="d65"):
"""lab to XYZ conversion.
lab: Bx3xHxW
return: Bx3xHxW
>>> lab2xyz(torch.tensor([0., 0., 0.]).view(1, 3, 1, 1)).view(-1)
tensor([0., 0., 0.])
>>> lab2xyz(torch.tensor([100., 0., 0.]).view(1, 3, 1, 1), white_point="e").view(-1)
tensor([1., 1., 1.])
>>> lab2xyz(torch.tensor([50., 25., -30.]).view(1, 3, 1, 1)).view(-1)
tensor([0.2254, 0.1842, 0.4046])
"""
f_xyz = _mul(_LAB_TO_XYZ, lab + _LAB_OFF.to(lab.device))
xyz = _inv_lab_f(f_xyz)
return xyz * WHITE_POINTS[white_point].to(lab.device)
def rgb2lab(rgb, white_point="d65", gamma_correction="srgb", clip_rgb=False, space="srgb"):
"""sRGB to Lab conversion."""
lab = xyz2lab(rgb2xyz(rgb, gamma_correction, clip_rgb, space), white_point)
return lab
def lab2rgb(rgb, white_point="d65", gamma_correction="srgb", clip_rgb=False, space="srgb"):
"""Lab to sRGB conversion."""
return xyz2rgb(lab2xyz(rgb, white_point), gamma_correction, clip_rgb, space)
def lab2lch(lab):
"""Lab to LCH conversion."""
l = lab[:, 0, :, :]
c = torch.norm(lab[:, 1:, :, :], 2, 1)
h = torch.atan2(lab[:, 2, :, :], lab[:, 1, :, :])
h = h * (180 / 3.141592653589793)
h = torch.where(h >= 0, h, 360 + h)
return torch.stack([l, c, h], 1)
def rgb2lch(rgb, white_point="d65", gamma_correction="srgb", clip_rgb=False, space="srgb"):
"""sRGB to LCH conversion."""
lab = rgb2lab(rgb, white_point, gamma_correction, clip_rgb, space)
return lab2lch(lab)
def squared_deltaE(lab1, lab2):
"""Squared Delta E (CIE 1976).
lab1: Bx3xHxW
lab2: Bx3xHxW
return: Bx1xHxW
"""
return torch.sum((lab1 - lab2) ** 2, 1, keepdim=True)
def deltaE(lab1, lab2):
"""Delta E (CIE 1976).
lab1: Bx3xHxW
lab2: Bx3xHxW
return: Bx1xHxW
>>> lab1 = torch.tensor([100., 75., 50.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([50., 50., 100.]).view(1, 3, 1, 1)
>>> deltaE(lab1, lab2).item()
75.0
"""
return torch.norm(lab1 - lab2, 2, 1, keepdim=True)
def squared_deltaE94(lab1, lab2):
"""Squared Delta E (CIE 1994).
Default parameters for the 'Graphic Art' version.
lab1: Bx3xHxW (reference color)
lab2: Bx3xHxW (other color)
return: Bx1xHxW
"""
diff_2 = (lab1 - lab2) ** 2
dl_2 = diff_2[:, 0:1, :, :]
c1 = torch.norm(lab1[:, 1:3, :, :], 2, 1, keepdim=True)
c2 = torch.norm(lab2[:, 1:3, :, :], 2, 1, keepdim=True)
dc_2 = (c1 - c2) ** 2
dab_2 = torch.sum(diff_2[:, 1:3, :, :], 1, keepdim=True)
dh_2 = torch.abs(dab_2 - dc_2)
de_2 = (dl_2 +
dc_2 / ((1 + 0.045 * c1) ** 2) +
dh_2 / ((1 + 0.015 * c1) ** 2))
return de_2
def deltaE94(lab1, lab2):
"""Delta E (CIE 1994).
Default parameters for the 'Graphic Art' version.
lab1: Bx3xHxW (reference color)
lab2: Bx3xHxW (other color)
return: Bx1xHxW
>>> lab1 = torch.tensor([100., 0., 0.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([80., 0., 0.]).view(1, 3, 1, 1)
>>> deltaE94(lab1, lab2).item()
20.0
>>> lab1 = torch.tensor([100., 0., 0.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([100., 20., 0.]).view(1, 3, 1, 1)
>>> deltaE94(lab1, lab2).item()
20.0
>>> lab1 = torch.tensor([100., 0., 10.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([100., 0., 0.]).view(1, 3, 1, 1)
>>> round(deltaE94(lab1, lab2).item(), 4)
6.8966
>>> lab1 = torch.tensor([100., 75., 50.]).view(1, 3, 1, 1)
>>> lab2 = torch.tensor([50., 50., 100.]).view(1, 3, 1, 1)
>>> round(deltaE94(lab1, lab2).item(), 4)
54.7575
"""
sq = torch.nn.functional.relu(squared_deltaE94(lab1, lab2))
return torch.sqrt(sq)
def _check_conversion(**opts):
"""Verify the conversions on the RGB cube.
>>> _check_conversion(white_point='d65', gamma_correction='srgb', clip_rgb=False, space='srgb')
True
>>> _check_conversion(white_point='d50', gamma_correction=1.8, clip_rgb=False, space='prophoto')
True
"""
for r in range(0, 256, 15):
for g in range(0, 256, 15):
for b in range(0, 256, 15):
rgb = torch.tensor([r / 255.0, g / 255.0, b / 255.0]).view(1, 3, 1, 1)
lab = rgb2lab(rgb, **opts)
rgb2 = lab2rgb(lab, **opts)
de = deltaE(rgb, rgb2).item()
if de > 2e-4:
print("Conversion failed for RGB:", r, g, b, " deltaE", de)
return False
return True
def _check_gradients():
"""Verify some borderline gradient computation
>>> a = torch.zeros(1, 3, 1, 1, requires_grad=True)
>>> b = torch.zeros(1, 3, 1, 1, requires_grad=True)
>>> deltaE(a, b).backward()
>>> torch.any(torch.isnan(a.grad)).item()
0
>>> torch.any(torch.isnan(b.grad)).item()
0
>>> deltaE94(a, b).backward()
>>> torch.any(torch.isnan(a.grad)).item()
0
>>> torch.any(torch.isnan(b.grad)).item()
0
"""
return True
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
print("Test completed")
| 11,528 | 28.561538 | 113 | py |
TreEnhance | TreEnhance-master/data_Prep.py | import torch
import torch.utils
import torch.utils.data
import os
from torchvision import transforms
from random import random, sample
from PIL import Image
class Dataset_LOL(torch.utils.data.Dataset):
def __init__(self, raw_dir, exp_dir, subset_img=None, size=None, training=True):
self.raw_dir = raw_dir
self.exp_dir = exp_dir
self.subset_img = subset_img
self.size = size
if subset_img is not None:
self.listfile = sample(os.listdir(raw_dir), self.subset_img)
else:
self.listfile = os.listdir(raw_dir)
transformation = []
if training:
transformation.append(transforms.RandomHorizontalFlip(0.5))
if size is not None:
if random() > 0.5:
transformation.append(transforms.RandomResizedCrop((size, size)))
if size is not None:
transformation.append(transforms.Resize((size, size)))
self.transforms = transforms.Compose(transformation)
def __len__(self):
return len(self.listfile)
def __getitem__(self, index):
raw = transforms.ToTensor()(Image.open(self.raw_dir + self.listfile[index]))
expert = transforms.ToTensor()(Image.open(self.exp_dir + self.listfile[index]))
if raw.shape != expert.shape:
raw = transforms.Resize((self.size, self.size))(raw)
expert = transforms.Resize((self.size, self.size))(expert)
raw_exp = self.transforms(torch.stack([raw, expert]))
return raw_exp[0], raw_exp[1]
class LoadDataset(torch.utils.data.Dataset):
def __init__(self, raw_list, prob_list, win):
self.raw_list = raw_list
self.prob_list = prob_list
self.win = win
self.indices = []
def __len__(self):
return len(self.raw_list)
def __getitem__(self, index):
return self.raw_list[index], self.prob_list[index], torch.tensor(self.win[index])
| 1,956 | 32.169492 | 89 | py |
TreEnhance | TreEnhance-master/training.py | #!/usr/bin/env python3
import torch
import torch.utils
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
import collections
import numpy as np
import random
from data_Prep import LoadDataset, Dataset_LOL
import Actions as Actions
import warnings
import Network
import os
from matplotlib import pyplot as plt
from metrics import PSNR
import tqdm
import mcts
import argparse
import ptcolor
# !!! Constants to command-line arguments
MAX_DEPTH = 10
STOP_ACTION = 36
def parse_args():
parser = argparse.ArgumentParser("TreEnhance Hyperparams")
a = parser.add_argument
a("basedir", help="BASE DIRECTORY")
a("expname",help="Name of the run")
a("dropout", type=float, default=0.6, help="Dropout")
a("num_images", type=int, default=100, help="number of Images")
a("num_steps", type=int, default=1000, help="number of steps")
a("val_images", type=int, default=100, help="number of val images")
a("lr", type=float, default=0.001, help="learning rate")
a("size", type=int, default=256, help="image size")
a("num_gen", type=float, default=256, help="number of generation")
a("bs", type=int, default=256, help="batch size")
a("lambd", type=int, default=20, help="lambda in the loss function")
a("loops", type=int, default=5, help="number of optimization loops")
return parser.parse_args()
def init_weights(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.zeros_(m.weight)
torch.nn.init.zeros_(m.bias)
def add_plot(x, y, writer, step):
plt.scatter(x, y, edgecolors='b')
plt.xticks(np.arange(0, 1, 0.1))
plt.yticks(np.arange(0, 1, 0.1))
plt.xlabel('z')
plt.ylabel('y')
plt.title('outcome plot')
plt.grid(True)
writer.add_figure('Fig1', plt.gcf(), step)
TrainingSample = collections.namedtuple("TrainingSample", ["image", "return_", "probabilities"])
def compute_error(x, y):
labx = ptcolor.rgb2lab(x.unsqueeze(0))
laby = ptcolor.rgb2lab(y.unsqueeze(0))
de = ptcolor.deltaE94(labx, laby)
return de
def train(samples, res, optimizer, step, device, writer,
train_loss_history, train_L1_history, train_L2_history, args, lambd=10):
img = [s.image.unsqueeze(0) for s in samples]
prob = [s.probabilities for s in samples]
win = [s.return_ for s in samples]
DS = LoadDataset(img, torch.tensor(prob), win)
L = torch.utils.data.DataLoader(DS, batch_size=64, drop_last=False,
shuffle=True, num_workers=0)
res.train()
loops = args.loops
for loop in tqdm.tqdm(range(loops)):
z_x, v_y = [], []
for img_prob in L:
outcome = img_prob[2].to(device)
optimizer.zero_grad()
pred, v = res(img_prob[0][:, 0, :, :, :].to(device))
z_x += outcome.unsqueeze(1)
v_y += v
l1 = lambd * ((outcome.unsqueeze(1) - v) ** 2)
l2 = -(((torch.tensor(img_prob[1]).to(device) *
torch.log(torch.clamp(pred, min=1e-8))).sum(1)))
loss = ((l1 + l2.unsqueeze(1)).mean())
train_loss_history.append(loss.item())
train_L1_history.append(l1.mean().item())
train_L2_history.append(l2.mean().item())
loss.backward()
optimizer.step()
step += 1
if step % 10 == 0:
mean_loss = (sum(train_loss_history) /
max(1, len(train_loss_history)))
mean_L1 = sum(train_L1_history) / max(1, len(train_L1_history))
mean_L2 = sum(train_L2_history) / max(1, len(train_L2_history))
writer.add_scalar('Loss', mean_loss, step)
writer.add_scalar('L1', mean_L1, step)
writer.add_scalar('L2', mean_L2, step)
tqdm.tqdm.write(f"{step} {mean_L1} + {mean_L2} = {mean_loss}")
z_x = torch.cat(z_x, dim=0)
v_y = torch.cat(v_y, dim=0)
add_plot(z_x.cpu().detach().numpy(), v_y.cpu().detach().numpy(),
writer, step)
writer.add_scalar('Average return', z_x.mean().item(), step)
return res, step
class TrainingState:
def __init__(self, image, target, depth=0):
self.image = image
self.target = target
self.depth = depth
self.stopped = False
def transition(self, action):
new_image = Actions.select(self.image[None], action)[0]
new_state = type(self)(new_image, self.target, self.depth + 1)
new_state.stopped = (action == STOP_ACTION)
return new_state
def terminal(self):
return self.depth >= MAX_DEPTH or self.stopped
def compute_return(self):
if self.depth >= MAX_DEPTH:
return 0.0
elif self.stopped:
d = torch.dist(self.image, self.target, 2)
return torch.exp(-0.05 * d).item()
else:
raise ValueError("This state has not return!")
def play_tree(net, images, targets, device, steps):
actions = STOP_ACTION + 1
samples = []
def transition(states, actions):
return [s.transition(a) for s, a in zip(states, actions)]
def evaluation(states):
t = [s.terminal() for s in states]
batch = torch.stack([s.image for s in states], 0)
batch = batch.to(device)
with torch.no_grad():
pi, values = net(batch)
pi = pi.cpu().numpy()
if np.all([v.depth == 0 for v in states]):
eps = 0.25
pi = (1 - eps) * pi + eps * np.random.dirichlet([0.03 for i in range(STOP_ACTION + 1)],
pi.shape[0])
r = [(s.compute_return() if s.terminal() else v.item())
for (v, s) in zip(values, states)]
return t, r, pi
root_states = [TrainingState(im, tgt) for im, tgt in zip(images, targets)]
trees = mcts.MCTS(root_states, actions, transition, evaluation, exploration=8, initial_q=1.0)
states = []
probs = []
samples = []
while not np.all(trees.T[:trees.roots]):
trees.grow(steps)
states.append(trees.x[:trees.roots])
tau = 1.0
numerator = trees.N[:trees.roots, :] ** (1 / tau)
denominator = np.maximum(1, numerator.sum(1, keepdims=True))
probs.append(numerator / denominator)
actions = trees.sample_path()[1]
trees.descend_tree(actions[:, 0])
errors = []
psnrs = []
for r in range(trees.roots):
z = trees.R[r]
for s, p in zip(states, probs):
if s[r].terminal():
errors.append(torch.dist(s[r].image, s[r].target, 2).item())
psnrs.append(PSNR(s[r].image, s[r].target).item())
break
samples.append(TrainingSample(s[r].image, z, p[r, :]))
return samples, errors, psnrs
def generation(res, loader, steps, device):
samples = []
errors = []
psnrs = []
res.eval()
for images, targets in tqdm.tqdm(loader):
s, e, p = play_tree(res, images, targets, device, steps)
samples.extend(s)
errors.extend(e)
psnrs.extend(p)
return samples, np.mean(errors), np.mean(psnrs)
def validation(val_loader, res, device, writer, step):
res.eval()
loss = []
Psnr_list = []
val_grid = torch.empty((16, 3, 64, 64))
with torch.no_grad():
for img, exp in tqdm.tqdm(val_loader):
img = img.to(device)
exp = exp.to(device).unsqueeze(0)
for it in range(MAX_DEPTH):
with torch.no_grad():
prob, z = res(img)
action = torch.argmax(torch.tensor(prob))
if action == STOP_ACTION:
break
img = Actions.select(img, action).to('cuda')
loss.append(torch.dist(img, exp, 2))
Psnr_list.append(PSNR(img, exp))
if len(loss) % 1 == 0:
val_grid[int(len(loss) / 1) - 1] = img.squeeze()
vpsnr = sum(Psnr_list) / len(Psnr_list)
if writer is not None:
writer.add_images('VAL IMAGE', val_grid, step)
writer.add_scalar('L2 Validation Loss', sum(loss) / len(loss), step)
writer.add_scalar('PSNR Validation Loss', vpsnr, step)
print('L2 Validation Loss', (sum(loss) / len(loss)).item())
res.train()
return vpsnr
def main():
args = parse_args()
BASEDIR = args.basedir
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Using device:", device)
warnings.filterwarnings("ignore")
raw_dir = BASEDIR+'/TRAIN/low/'
exp_dir = BASEDIR+'/TRAIN/high/'
val_dirR = BASEDIR+'/VAL/low/'
val_dirE = BASEDIR+'/VAL/high/'
expname = args.expname
weightfile = os.path.join("./", expname + ".pt")
tblocation = os.path.join("./tensor/", expname)
res = Network.ModifiedResnet(STOP_ACTION + 1, Dropout=args.dropout)
res.to(device)
images = args.num_images
steps = args.num_steps
val_images = args.val_images
param = res.parameters()
optimizer = torch.optim.AdamW(param, lr=args.lr)
dataset = Dataset_LOL(raw_dir, exp_dir, size=args.size, training=True)
val_set = Dataset_LOL(val_dirR, val_dirE, size=args.size, training=False)
indices = random.sample(list(range(len(val_set))), val_images)
val_set = torch.utils.data.Subset(val_set, indices)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=1,
drop_last=False,
shuffle=True, num_workers=0)
writer = SummaryWriter(tblocation)
train_loss_history = collections.deque(maxlen=100)
train_L1_history = collections.deque(maxlen=100)
train_L2_history = collections.deque(maxlen=100)
numGeneration = args.num_gen
step = 0
max_psnr = 0.0
for gen_count in range(0, numGeneration + 1):
samples = []
indices = random.sample(list(range(len(dataset))), images)
subset = torch.utils.data.Subset(dataset, indices)
loader = torch.utils.data.DataLoader(subset, batch_size=args.bs, drop_last=False, shuffle=True, num_workers=0)
print('GENERATION', gen_count)
s, mean_error, psnr = generation(res, loader, steps, device)
writer.add_scalar('L2 train Loss', mean_error, gen_count)
writer.add_scalar('PSNR train Loss', psnr, gen_count)
print('TRAIN')
res, step = train(samples, res, optimizer, step, device,
writer, train_loss_history, train_L1_history,
train_L2_history,args, lambd=args.lambd)
torch.save(res.state_dict(), weightfile)
print('VALIDATION')
if gen_count % 1 == 0:
act_psnr = validation(val_loader, res, device, writer, gen_count)
if act_psnr >= max_psnr:
max_psnr = act_psnr
best_model = res.state_dict()
print('Best model updated', max_psnr)
torch.save(best_model, './' + expname + '_best_model.pt')
| 11,048 | 36.327703 | 118 | py |
TreEnhance | TreEnhance-master/Actions.py | import torch
import torch.utils
import torch.utils.data
from ColorAlgorithms import Gray_World, MaxRGB, saturation, hue
from torchvision import transforms
from PIL import ImageFilter
def select(img, act):
if act == 0:
return gamma_corr(img, 0.6, 0)
elif act == 1:
return gamma_corr(img, 0.6, 1)
elif act == 2:
return gamma_corr(img, 0.6, 2)
elif act == 3:
return gamma_corr(img, 1.1, 0)
elif act == 4:
return gamma_corr(img, 1.1, 1)
elif act == 5:
return gamma_corr(img, 1.1, 2)
elif act == 6:
return gamma_corr(img, 0.6)
elif act == 7:
return gamma_corr(img, 1.1)
elif act == 8:
return brightness(img, 0.1, 0)
elif act == 9:
return brightness(img, 0.1, 1)
elif act == 10:
return brightness(img, 0.1, 2)
elif act == 11:
return brightness(img, -0.1, 0)
elif act == 12:
return brightness(img, -0.1, 1)
elif act == 13:
return brightness(img, -0.1, 2)
elif act == 14:
return brightness(img, 0.1)
elif act == 15:
return brightness(img, -0.1)
elif act == 16:
return contrast(img, 0.8, 0)
elif act == 17:
return contrast(img, 0.8, 1)
elif act == 18:
return contrast(img, 0.8, 2)
elif act == 19:
return contrast(img, 2, 0)
elif act == 20:
return contrast(img, 2, 1)
elif act == 21:
return contrast(img, 2, 2)
elif act == 22:
return contrast(img, 0.8)
elif act == 23:
return contrast(img, 2)
elif act == 24:
return saturation(img, 0.5)
elif act == 25:
return saturation(img, 2)
elif act == 26:
return hue(img, 0.05)
elif act == 27:
return hue(img, -0.05)
elif act == 28:
return Gray_World(img)
elif act == 29:
return MaxRGB(img)
elif act == 30:
return apply_filter(img, ImageFilter.MedianFilter)
elif act == 31:
return apply_filter(img, ImageFilter.SHARPEN)
elif act == 32:
return apply_filter(img, ImageFilter.GaussianBlur)
elif act == 33:
return apply_filter(img, ImageFilter.EDGE_ENHANCE)
elif act == 34:
return apply_filter(img, ImageFilter.DETAIL)
elif act == 35:
return apply_filter(img, ImageFilter.SMOOTH)
elif act == 36:
return img
def gamma_corr(image, gamma, channel=None):
mod = image.clone()
if channel is not None:
mod[:, channel, :, :] = mod[:, channel, :, :] ** gamma
else:
mod = mod ** gamma
return mod
def brightness(image, bright, channel=None):
mod = image.clone()
if channel is not None:
mod[:, channel, :, :] = torch.clamp(mod[:, channel, :, :] + bright, 0, 1)
else:
mod = torch.clamp(mod + bright, 0, 1)
return mod
def apply_filter(image, filter):
mod = image.clone()
mod = (transforms.ToPILImage()(mod.squeeze(0)))
mod = transforms.ToTensor()(mod.filter(filter))
return mod.unsqueeze(0)
def contrast(image, alpha, channel=None):
mod = image.clone()
if channel is not None:
mod[:, channel, :, :] = torch.clamp(
torch.mean(mod[:, channel, :, :]) + alpha * (mod[:, channel, :, :] - torch.mean(mod[:, channel, :, :])), 0,
1)
else:
mod = torch.clamp(torch.mean(mod) + alpha * (mod - torch.mean(mod)), 0, 1)
return mod
| 3,418 | 27.491667 | 119 | py |
TreEnhance | TreEnhance-master/mcts.py | """Implementation of Monte Carlo Tree Search algorithm.
This implementation allows "parallel" multi-root trees. This does not
enable parallel computation, but reduces significantly the python
overhead thanks to numpy optimized operations.
The search can be configured by providing custom transition and
evaluation functions.
"""
import numpy as np
class MCTS:
"""A multi-rooted tree grown with the Monte Carlo Tree Search algorithm.
Attributes:
roots (int): number of roots in the tree
nodes (int): number of nodes
height (int): height of the tree
actions (int): number of actions
exploration (float): exploration coefficient
x (ndarray[object]): states
Q (ndarray[float]): matrix of state-action values
N (ndarray[int]): matrix of visit counters (nodes, actions)
C (ndarray[int]): matrix of indices of children (nodes, actions) (-1 if missing)
T (ndarray[bool]): flags for terminal nodes
R (ndarray[float]): returns of nodes
pi (ndarray[float]): prior probabilities (nodes, actions)
"""
def __init__(self, root_states, actions, transition_function,
evaluation_function, exploration=1, initial_q=0.0):
"""Create and initialize the tree.
The tree will be initialized with one root for each initial state.
Args:
root_states (iterable): initial states
actions (int): number of actions in each non-terminal node
transition_function (callable): function computing new states
evaluation_function (callable): function computing info from states
exploration (float): exploration coefficient
initial_q (float): value for non-visited nodes
The transition function will be called with an array of states
and one of actions. The result must be the array of states
obtained by performing the actions in the given states.
The evaluation function will be called with an array of
states. The result must be a triplet of arrays. The first
tells which states are terminal; the second provides the
return for the states; the third is a matrix containing the
prior probabilities for the states.
The node states and are not directly processed by this class
and can be objects of any kind.
"""
root_states = list(root_states)
roots = len(root_states)
self.roots = roots
self.nodes = roots
self.height = 1
self.actions = actions
self.exploration = exploration
self.initial_q = initial_q
self.transition_function = transition_function
self.evaluation_function = evaluation_function
self.x = np.empty(roots, dtype=object)
self.x[:] = root_states
self.Q = np.full((roots, actions), initial_q, dtype=np.float32)
self.N = np.zeros((roots, actions), dtype=np.int32)
self.C = np.full((roots, actions), -1, dtype=np.int32)
T, R, pi = evaluation_function(self.x)
self.T = np.array(T, dtype=bool)
self.R = np.array(R, dtype=np.float32)
self.pi = np.array(pi, dtype=np.float32)
def select_by_policy(self, policy):
"""Select leaves in the tree.
Descend the trees starting from the roots, by applying the
given policy function until final (terminal or non expanded)
nodes are found.
Args:
policy (callable): the policy used to select the nodes
Returns:
paths (ndarray[int]): matrix with indices of the nodes visited
actions (ndarray[int]): matrix with the actions taken at each step
lenghts (ndarray[int]): array with the lengths of the paths
The policy will be called with an array of indices of nodes.
It must return an array of actions.
paths[i, j] is the j-th node visited starting from the i-th
root and actions[i, j] is the action taken from it. paths[i,
lengths[i] - 1] is the final node reached from the i-th root.
actions[i, j] if the action that is taken from path[i, j] to
reach path[i, j + 1]. actions[i, lengths[i] - 1] is the
action that would be taken from the last node in the path
according to he policy and is used by MCTS to decide how to
expand the leaf.
"""
idx = np.arange(self.roots)
active = np.arange(self.roots)
paths = np.full((self.roots, self.height), -1, dtype=int)
actions = np.full((self.roots, self.height), -1, dtype=int)
lengths = np.zeros(self.roots, dtype=int)
t = 0
while active.size > 0:
if 0 and idx[0] == 2 and self.N[idx].sum() == 7:
breakpoint()
a = policy(idx)
paths[active, t] = idx
actions[active, t] = a
lengths[active] += 1
idx = self.C[idx, a]
sub = (idx >= 0)
idx = idx[sub]
active = active[sub]
t += 1
return paths, actions, lengths
def _ucb_policy(self, nodes):
frac = np.sqrt(np.maximum(1e-8, self.N[nodes].sum(1, keepdims=True))) / (self.N[nodes] + 1)
ucb = self.Q[nodes] + self.exploration * self.pi[nodes] * frac
return ucb.argmax(1)
def select(self):
"""Select the nodes by maximizing the UCB."""
return self.select_by_policy(self._ucb_policy)
def expand(self, leaves, actions, lengths):
"""Expand the given leaves by applying the actions.
Args:
leaves (ndarray[int]): indices of the nodes to expand
actions (ndarray[int]): actions to be performed at the leaves
lengths (ndarray[int]): length of the paths obtained to reach the leaves
Returns:
The indices of the new nodes added to the tree.
Note:
Leaves that are terminal are not expanded. In that case
the index of the leaf itself is returned.
"""
nodes = np.empty(leaves.size, dtype=int)
term = self.T[leaves]
nodes[term] = leaves[term]
non_terminal = leaves[~term]
if non_terminal.size > 0:
assert self.nodes + non_terminal.size <= self.x.size
new_nodes = np.arange(self.nodes, self.nodes + non_terminal.size)
nodes[~term] = new_nodes
self.C[non_terminal, actions[~term]] = new_nodes
new_states = self.transition_function(self.x[non_terminal], actions[~term])
new_T, new_R, new_pi = self.evaluation_function(new_states)
self.x[new_nodes] = new_states
self.T[new_nodes] = new_T
self.R[new_nodes] = new_R
self.pi[new_nodes, :] = new_pi
self.nodes += new_nodes.size
self.height = max(self.height, lengths[~term].max() + 1)
return nodes
def evaluate(self, nodes):
"""Evaluate the nodes.
Args:
nodes (ndarray[int]): indices of the nodes to evaluate
Returns:
An array with the evaluations.
"""
return self.R[nodes]
def backup(self, paths, actions, depths, values):
"""Update the values and the counters in the tree.
Args:
paths (ndarray[int]): indices of the nodes visited
actions (ndarray[int]): actions performed at the nodes
depths (ndarray[int]): length of the paths from each root
values (ndarray[int]): evaluation of the leaves (one for each path)
"""
p = np.concatenate([paths[i, :d] for i, d in enumerate(depths)])
a = np.concatenate([actions[i, :d] for i, d in enumerate(depths)])
v = np.concatenate([[v] * d for v, d in zip(values, depths)])
self.Q[p, a] = (self.Q[p, a] * self.N[p, a] + v) / (self.N[p, a] + 1)
self.N[p, a] += 1
def _grow_array(self, a, n, val):
b = np.full((n, *a.shape[1:]), val, dtype=a.dtype)
return np.append(a, b, 0)
def grow(self, steps):
"""Grow the tree with additional nodes.
Args:
steps (int): number of nodes to add under each root
"""
k = steps * self.roots
self.Q = self._grow_array(self.Q, k, self.initial_q)
self.N = self._grow_array(self.N, k, 0)
self.pi = self._grow_array(self.pi, k, 1 / self.actions)
self.C = self._grow_array(self.C, k, -1)
self.T = self._grow_array(self.T, k, False)
self.R = self._grow_array(self.R, k, np.nan)
self.x = self._grow_array(self.x, k, None)
ran = np.arange(self.roots)
for step in range(steps):
paths, actions, depths = self.select()
nodes = self.expand(paths[ran, depths - 1], actions[ran, depths - 1], depths)
values = self.evaluate(nodes)
self.backup(paths, actions, depths, values)
if self.nodes == self.x.size:
return
self.Q = self.Q[:self.nodes, :]
self.N = self.N[:self.nodes, :]
self.pi = self.pi[:self.nodes, :]
self.C = self.C[:self.nodes, :]
self.T = self.T[:self.nodes]
self.R = self.R[:self.nodes]
self.x = self.x[:self.nodes]
def most_visited(self):
"""Select nodes by choosing those with the highest visit count."""
return self.select_by_policy(lambda nodes: self.N[nodes].argmax(1))
def _sample_policy(self, nodes, eps=1e-6):
c = self.N[nodes] + eps
p = c / c.sum(1, keepdims=True)
a = [np.random.choice(self.actions, p=p[i]) for i in range(nodes.size)]
return np.array(a)
def sample_path(self):
"""Select nodes by randomly descending the trees."""
return self.select_by_policy(self._sample_policy)
def descend_tree(self, actions):
"""Move down each root by applying an action.
Args:
actions (ndarray): action to apply (one per root).
Note that roots that are also terminal are not changed.
"""
new_roots = self.C[np.arange(self.roots), actions]
active = (new_roots >= 0)
new_roots[~active] = np.arange(self.roots)[~active]
descendants = [new_roots]
active = new_roots[active]
while active.size:
active = self.C[active, :].flatten()
active = active[active >= 0]
descendants.append(active)
new_to_old = np.concatenate(descendants)
self.nodes = new_to_old.size
self.height = len(descendants) - (descendants[-1].size == 0)
old_to_new = np.full(self.x.size + 1, -1)
old_to_new[new_to_old] = np.arange(self.nodes)
self.x = self.x[new_to_old]
self.Q = self.Q[new_to_old, :]
self.N = self.N[new_to_old, :]
self.C = old_to_new[self.C[new_to_old, :]]
self.T = self.T[new_to_old]
self.R = self.R[new_to_old]
self.pi = self.pi[new_to_old, :]
def dump(self, filename, root=None):
"""Dump the content of the tree to a dot file.
Args:
filename (str): name of the output file
root (int or None): root of the subtree to dump (or the full tree when None)
"""
fmtnode = (' N{} [shape=rect, color="{}", ' +
'label="N{}\\n#{:d} / q{:.3f} / r{:.3f}"]')
fmtedge = ' N{} -- N{} [label="{} [{:.3f}]"]'
with open(filename, "w") as f:
print("graph {", file=f)
print(' rankdir="LR"', file=f)
roots = (range(self.roots) if root is None else [root])
for n in roots:
tot = 1 + self.N[n, :].sum()
avgq = (self.R[n] + self.Q[n, :] @ self.N[n, :]) / tot
color = ("red" if self.T[n] else "blue")
line = fmtnode.format(n, color, n, tot, avgq, self.R[n])
print(line, file=f)
nodes = (range(self.nodes) if root is None else self.subtree(root))
for n in nodes:
for a in range(self.N.shape[1]):
c = self.C[n, a]
if c < 0:
continue
color = ("red" if self.T[c] else "blue")
line = fmtnode.format(c, color, c, self.N[n, a],
self.Q[n, a], self.R[c])
print(line, file=f)
line = fmtedge.format(n, c, a, self.pi[n, a])
print(line, file=f)
print("}", file=f)
def subtree(self, root_index):
"""Return the indices of nodes descending from root_index."""
nodes = [root_index]
subtree = []
while nodes:
n = nodes.pop()
subtree.append(n)
nodes.extend(c for c in self.C[n, :] if c >= 0)
subtree.sort()
return subtree
def _test():
def evaluation(state):
state = np.array(state)
return state == 0, -np.abs(state), np.ones((state.size, 2)) / 2
def transition(state, actions):
return state + (2 * actions - 1)
tree = MCTS(np.arange(-3, 4), 2, transition, evaluation)
tree.grow(100)
p, a, d = tree.most_visited()
for aa, dd in zip(a, d):
print(aa[:dd])
print()
for aa, dd in zip(p, d):
print(aa[:dd])
print(tree.nodes, tree.height)
tree.descend_tree(a[:, 0])
print(tree.nodes, tree.height)
if __name__ == "__main__":
_test()
| 13,523 | 37.862069 | 99 | py |
TreEnhance | TreEnhance-master/Network.py | import torch
from torchvision.models import resnet18
import torch.nn as nn
class ModifiedResnet(nn.Module):
def __init__(self, n_actions, Dropout=None):
super(ModifiedResnet, self).__init__()
self.model = resnet18(pretrained=False)
num_ftrs = self.model.fc.in_features
self.model.fc = nn.Identity()
self.fc1 = nn.Sequential(nn.Linear(num_ftrs, n_actions), nn.Softmax(dim=1))
if Dropout is not None:
self.fc2 = nn.Sequential(nn.Linear(num_ftrs, 256), nn.ReLU(), torch.nn.Dropout(Dropout), nn.Linear(256, 1),
nn.Sigmoid())
else:
self.fc2 = nn.Sequential(nn.Linear(num_ftrs, 256), nn.ReLU(), nn.Linear(256, 1), nn.Sigmoid())
def forward(self, x):
x = self.model(x)
out1 = self.fc1(x)
out2 = self.fc2(x)
return out1, out2
| 879 | 34.2 | 119 | py |
TreEnhance | TreEnhance-master/metrics.py | import torch
def PSNR(img, gt):
mseL = torch.nn.MSELoss()
mse = mseL(img, gt)
if mse != 0:
print(20 * torch.log10(1 / torch.sqrt(mse)))
return 20 * torch.log10(1 / torch.sqrt(mse))
return 20 * torch.log10(1 / torch.sqrt(torch.tensor(1e-9)))
| 275 | 24.090909 | 63 | py |
TreEnhance | TreEnhance-master/evaluation.py | #!/usr/bin/env python3
import warnings
import torch
import torch.utils
import torch.utils.data
import numpy as np
from data_Prep import Dataset_LOL
import Actions as Actions
import Network
import tqdm
import mcts
import argparse
from ptcolor import deltaE94, rgb2lab
warnings.filterwarnings("ignore")
NUM_ACTIONS = 37
MAX_DEPTH = 10
STOP_ACTION = NUM_ACTIONS - 1
IMAGE_SIZE = 256
def parse_args():
parser = argparse.ArgumentParser("Compute performace statistics.")
a = parser.add_argument
a("base_dir", help="dataset BASE Directory")
a("weight_file", help="File storing the weights of the CNN")
a("-s", "--steps", type=int, default=1000, help="Number of MCTS steps")
a("-e", "--exploration", type=float, default=10, help="Exploration coefficient")
a("-q", "--initial-q", type=float, default=0.5, help="Value for non-visited nodes")
a("-b", "--batch-size", type=int, default=30, help="Size of the mini batches")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a("-d", "--device", default=device, help="Computing device")
return parser.parse_args()
class EvalState:
def __init__(self, image, depth=0):
self.image = image
self.depth = depth
self.stopped = False
def transition(self, action):
new_image = Actions.select(self.image[None], action)[0]
new_state = type(self)(new_image, self.depth + 1)
new_state.stopped = (action == STOP_ACTION)
return new_state
def terminal(self):
return self.depth >= MAX_DEPTH or self.stopped
def play_tree(net, images, device, steps, initial_q, exploration):
actions = STOP_ACTION + 1
samples = []
def transition(states, actions):
return [s.transition(a) for s, a in zip(states, actions)]
def evaluation(states):
t = [s.terminal() for s in states]
batch = torch.stack([s.image for s in states], 0)
batch = batch.to(device)
with torch.no_grad():
pi, values = net(batch)
pi = pi.cpu().numpy()
values = values.squeeze(1).cpu().numpy()
return t, values, pi
root_states = [EvalState(im) for im in images]
trees = mcts.MCTS(root_states, actions, transition, evaluation,
exploration=exploration, initial_q=initial_q)
trees.grow(steps)
return trees
def mse_error(x, y):
diff = (x - y).reshape(x.size(0), -1)
return (diff ** 2).mean(1)
def average_psnr(mses):
mses = np.maximum(np.array(mses), 1e-6)
return (-10 * np.log10(mses)).mean()
def eval_closest_node(trees, targets):
mses = []
for n in range(trees.roots):
sub = trees.subtree(n)
images = torch.stack([s.image for s in trees.x[sub]], 0)
mse = mse_error(images, targets[n:n + 1]).min()
mses.append(mse)
return mses
def eval_most_valuable_node(trees, targets):
mses = []
def key(i):
return trees.R[i] if trees.T[i] else -1
for n in range(trees.roots):
sub = trees.subtree(n)
best = max(sub, key=key)
image = trees.x[best].image[None]
mse = mse_error(image, targets[n:n + 1])
mses.append(mse.item())
return mses
def evaluation(val_loader, res, args):
res.eval()
mses = []
closest_mses = []
valuable_mses = []
l2s = []
diz = {k: 0 for k in range(NUM_ACTIONS)}
diz[-1] = 0
for img, target, name in tqdm.tqdm(val_loader):
trees = play_tree(res, img, args.device, args.steps, args.initial_q, args.exploration)
paths, actions, depths = trees.most_visited()
leaves = paths[np.arange(depths.size), depths - 1]
enhanced = torch.stack([s.image for s in trees.x[leaves]], 0)
for i in range(enhanced.shape[0]):
act = actions[i]
for ac in act:
diz[ac] += 1
if ac == STOP_ACTION:
break
l2s.append(torch.dist(enhanced[i], target[i], 2))
mse = mse_error(enhanced, target)
mses.extend(mse.tolist())
deltae = deltaE94(rgb2lab(enhanced), rgb2lab(target))
l2s = (torch.stack(l2s, 0)).mean()
closest_mses.extend(eval_closest_node(trees, target))
valuable_mses.extend(eval_most_valuable_node(trees, target))
print(diz)
print(f"PSNR {average_psnr(mses):.3f}")
def main():
args = parse_args()
print('STEPS:', args.steps)
BASEDIR = args.basedir
raw_dir = BASEDIR+'TEST/low/'
exp_dir = BASEDIR+'TEST/high/'
res = Network.ModifiedResnet(NUM_ACTIONS, 0.0)
res.to(args.device)
print("Loading", args.weight_file)
weights = torch.load(args.weight_file, map_location=args.device)
res.load_state_dict(weights)
val_set = Dataset_LOL(raw_dir, exp_dir, size=IMAGE_SIZE, training=False)
val_loader = torch.utils.data.DataLoader(val_set,
batch_size=args.batch_size,
drop_last=False,
shuffle=False,
num_workers=1)
import time
start = time.time()
evaluation(val_loader, res, args)
print('ELAPSED:', time.time() - start)
if __name__ == '__main__':
import resource
GB = (2 ** 30)
mem = 30 * GB
resource.setrlimit(resource.RLIMIT_DATA, (mem, resource.RLIM_INFINITY))
main()
| 5,424 | 30.358382 | 94 | py |
TreEnhance | TreEnhance-master/ColorAlgorithms.py | import torch
from torchvision.transforms.functional import adjust_saturation, adjust_hue
def Gray_World(img):
m = img.mean(-2, True).mean(-1, True)
img = img / torch.clamp(m, min=1e-3)
ma = img.max(-1, True).values.max(-2, True).values.max(-3, True).values
return img / torch.clamp(ma, min=1e-3)
def MaxRGB(img):
maxs = img.max(-1, True).values.max(-2, True).values.max(-3, True).values
return img / torch.clamp(maxs, min=1e-3)
def saturation(img, param):
return adjust_saturation(img, param)
def hue(img, param):
return adjust_hue(img, param)
| 584 | 24.434783 | 77 | py |
uSDN | uSDN-master/utils.py | import numpy as np
import scipy.io as sio
import collections
import scipy.misc
def loadData(mname):
return sio.loadmat(mname)
def readData(filename,num=10):
input = loadData(filename)
data = collections.namedtuple('data', ['hyperLR', 'multiHR', 'hyperHR',
'dimLR', 'dimHR', 'srf','srfactor',
'colLR','meanLR', 'reducedLR',
'sphere','num'],
verbose=False)
data.hyperLR = np.array(input['hyperLR']).astype(np.float32)
data.multiHR = np.array(input['multiHR']).astype(np.float32)
data.hyperHR = np.array(input['hyperHR']).astype(np.float32)
data.hyperLRI = np.array(input['hyperLRI']).astype(np.float32)
data.dimLR = data.hyperLR.shape
data.dimHR = data.multiHR.shape
data.num = num
# 3*31
srf = [[0.005, 0.007, 0.012, 0.015, 0.023, 0.025, 0.030, 0.026, 0.024, 0.019,
0.010, 0.004, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.001, 0.002, 0.003, 0.005, 0.007,
0.012, 0.013, 0.015, 0.016, 0.017, 0.02, 0.013, 0.011, 0.009, 0.005,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.002, 0.002, 0.003],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.001, 0.003, 0.010, 0.012, 0.013, 0.022,
0.020, 0.020, 0.018, 0.017, 0.016, 0.016, 0.014, 0.014, 0.013]]
srf = np.array(srf).astype(np.float32)
data.srf = srf.astype(np.float32)
data.sr_factor = np.divide(data.dimHR[0],data.dimLR[0]).astype(np.int)
data.col_lr_hsi = np.reshape(data.hyperLR,[data.dimLR[0]*data.dimLR[1],data.dimLR[2]])
data.mean_lr_hsi = np.mean(data.col_lr_hsi,axis=0,keepdims=True)
data.rcol_lr_hsi = np.subtract(data.col_lr_hsi,data.mean_lr_hsi)
data.img_lr_hsi = np.reshape(data.rcol_lr_hsi,[data.dimLR[0],data.dimLR[1],data.dimLR[2]])
data.col_hr_msi = np.reshape(data.multiHR, [data.dimHR[0] * data.dimHR[1], data.dimHR[2]])
data.mean_hr_msi = np.mean(data.col_hr_msi, axis=0, keepdims=True)
data.rcol_hr_msi = np.subtract(data.col_hr_msi,data.mean_hr_msi)
data.img_hr_msi = np.reshape(data.rcol_hr_msi, [data.dimHR[0],data.dimHR[1],data.dimHR[2]])
data.multiLR = scipy.ndimage.zoom(data.multiHR, zoom=[1.0 / data.sr_factor, 1.0 / data.sr_factor, 1],order=0)
data.col_lr_msi = np.reshape(data.multiLR, [data.dimLR[0] * data.dimLR[1], data.dimHR[2]])
data.mean_lr_msi = np.mean(data.col_lr_msi, axis=0, keepdims=True)
data.rcol_lr_msi = np.subtract(data.col_lr_msi,data.mean_lr_msi)
data.col_hr_hsi = np.reshape(data.hyperHR,[data.dimHR[0]*data.dimHR[1],data.dimLR[2]])
return data | 2,846 | 48.086207 | 113 | py |
uSDN | uSDN-master/uSDN.py | from tensorflow.python.client import device_lib
from utils import *
from pbAuto_uSDN import*
import time
import os
import argparse
parser = argparse.ArgumentParser(description='uSDN for HSI-SR')
parser.add_argument('--cuda', default='0', help='Choose GPU.')
parser.add_argument('--filenum', type=str, default='balloons_ms', help='Image Name.')
parser.add_argument('--load_path', default='_sparse', help='Model Path.')
parser.add_argument('--save_path', default='_sparse')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES']= args.cuda
tf.logging.set_verbosity(tf.logging.ERROR)
def main():
# config = tf.ConfigProto(device_count={'GPU':8})
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.8
local_device_protos = device_lib.list_local_devices()
print(local_device_protos)
loadLRonly = True
loadLRonly = False
initp = False
initp = True
lr_rate = 0.001
p_rate = 0.00001
maxiter = 500000
tol = 1.5
vol_r = 0.0001
sp_r_lsi = 0.001
sp_r_msi = 0.0001
num = 12
ly = 3
init_num = 15000
nLRlevel = [ly,ly,ly,ly,ly]
nHRlevel = [ly,ly,ly,ly,ly]
file_content = 'data_cave/'+ str(args.filenum) + '.mat'
load_path = str(args.filenum) + args.load_path + str(num) + '_vol_' + str(vol_r) + '_s'+str(sp_r_lsi)+'/'
save_path = str(args.filenum) + args.load_path + str(num) + '_vol_' + str(vol_r) + '_s'+str(sp_r_lsi)+'/'
print('image pair '+str(args.filenum) + ' is processing')
data = readData(file_content, num)
# betapan(input data,rate for LR HSI, rate for HR MSI,
# lr network level, hr network level
# maximum epoch, is_adam,
# lr volume rate, lr sparse rate,
# hr sparse rate,
# init p or not, tolerence, initnumber)
auto = betapan(data, lr_rate, p_rate,
nLRlevel, nHRlevel,
maxiter, True,
vol_r,sp_r_lsi,sp_r_msi,initp,config)
start_time = time.time()
path = auto.train(load_path, save_path,loadLRonly, tol, init_num)
# print path
# auto.generate_hrhsi(save_path, load_path)
print("--- %s seconds ---" % (time.time() - start_time))
print('image pair '+str(args.filenum) + ' is done')
if __name__ == "__main__":
# define main use two __, if use only one_, it will not debug
main()
| 2,351 | 29.545455 | 109 | py |
uSDN | uSDN-master/pbAuto_uSDN.py | import tensorflow as tf
import numpy as np
import math
# import matplotlib.pyplot as plt
import scipy.io as sio
import random
import scipy.misc
import os
from tensorflow.python.training import saver
import tensorflow.contrib.layers as ly
from os.path import join as pjoin
from numpy import *
import numpy.matlib
import scipy.ndimage
import csv
class betapan(object):
def __init__(self, input, lr_rate, p_rate, nLRlevel, nHRlevel, epoch, is_adam,
vol_r, sp_r_lsi, sp_r_msi, initp, config):
# initialize the input and weights matrices
self.input = input
self.initlrate = lr_rate
self.initprate = p_rate
self.epoch = epoch
self.nLRlevel = nLRlevel
self.nHRlevel = nHRlevel
self.num = input.num
self.is_adam = is_adam
self.vol_r = vol_r
self.sp_r_lsi = sp_r_lsi
self.sp_r_msi = sp_r_msi
self.mean_lrhsi = input.mean_lr_hsi
self.mean_hrmsi = input.mean_hr_msi
self.dimlr = input.dimLR
self.dimhr = input.dimHR
self.input_lr_hsi = input.rcol_lr_hsi
self.input_hr_msi = input.rcol_hr_msi
self.input_lr_msi = input.rcol_lr_msi
self.input_hr_msi_h = np.zeros([input.dimLR[0]*input.dimLR[1],input.num])
self.sess = tf.Session(config=config)
self.initp = initp
with tf.name_scope('inputs'):
self.lr_hsi = tf.placeholder(tf.float32, [None, self.dimlr[2]], name='lrhsi_input')
self.hr_msi = tf.placeholder(tf.float32, [None, self.dimhr[2]], name='hrmsi_input')
self.hr_msi_h = tf.placeholder(tf.float32,[None, input.num], name = 'hrmsi_h')
with tf.variable_scope('lr_decoder') as scope:
self.wdecoder = {
'lr_decoder_w1': tf.Variable(tf.truncated_normal([self.num, self.num],stddev=0.1)),
'lr_decoder_w2': tf.Variable(tf.truncated_normal([self.num, self.dimlr[2]], stddev=0.1)),
}
def compute_latent_vars_break(self, i, remaining_stick, v_samples):
# compute stick segment
stick_segment = v_samples[:, i] * remaining_stick
remaining_stick *= (1 - v_samples[:, i])
return (stick_segment, remaining_stick)
def construct_vsamples(self,uniform,wb,hsize):
concat_wb = wb
for iter in range(hsize - 1):
concat_wb = tf.concat([concat_wb, wb], 1)
v_samples = uniform ** (1.0 / concat_wb)
return v_samples
def encoder_uniform_hsi(self,x,reuse=False):
layer_1 = tf.matmul(x, self.input.srf.T)
with tf.variable_scope('lr_hsi_uniform') as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
layer_11 = tf.contrib.layers.fully_connected(layer_1, self.nLRlevel[0], activation_fn=None)
stack_layer_11 = tf.concat([x,layer_11], 1)
layer_12 = tf.contrib.layers.fully_connected(stack_layer_11, self.nLRlevel[1], activation_fn=None)
stack_layer_12 = tf.concat([stack_layer_11, layer_12], 1)
layer_13 = tf.contrib.layers.fully_connected(stack_layer_12, self.nLRlevel[2], activation_fn=None)
stack_layer_13 = tf.concat([stack_layer_12, layer_13], 1)
layer_14 = tf.contrib.layers.fully_connected(stack_layer_13, self.nLRlevel[3], activation_fn=None)
stack_layer_14 = tf.concat([stack_layer_13, layer_14], 1)
uniform = tf.contrib.layers.fully_connected(stack_layer_14, self.num, activation_fn=None)
return layer_1, uniform
def encoder_uniform_msi(self,x,reuse=False):
with tf.variable_scope('hr_msi_uniform') as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
layer_11 = tf.contrib.layers.fully_connected(x, self.nLRlevel[0], activation_fn=None)
stack_layer_11 = tf.concat([x,layer_11], 1)
layer_12 = tf.contrib.layers.fully_connected(stack_layer_11, self.nLRlevel[1], activation_fn=None)
stack_layer_12 = tf.concat([stack_layer_11, layer_12], 1)
layer_13 = tf.contrib.layers.fully_connected(stack_layer_12, self.nLRlevel[2], activation_fn=None)
stack_layer_13 = tf.concat([stack_layer_12, layer_13], 1)
layer_14 = tf.contrib.layers.fully_connected(stack_layer_13, self.nLRlevel[3], activation_fn=None)
stack_layer_14 = tf.concat([stack_layer_13, layer_14], 1)
# layer_15 = tf.contrib.layers.fully_connected(stack_layer_14, self.nLRlevel[3], activation_fn=None)
# stack_layer_15 = tf.concat([stack_layer_14, layer_15], 1)
uniform = tf.contrib.layers.fully_connected(stack_layer_14, self.num, activation_fn=None)
return layer_11, uniform
def encoder_beta_hsi(self,x,reuse=False):
with tf.variable_scope('lr_hsi_beta') as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
layer_21 = tf.contrib.layers.fully_connected(x, self.nLRlevel[0], activation_fn=None)
stack_layer_21 = tf.concat([x,layer_21], 1)
layer_22 = tf.contrib.layers.fully_connected(stack_layer_21, self.nLRlevel[1], activation_fn=None)
stack_layer_22 = tf.concat([layer_22, stack_layer_21], 1)
layer_32 = tf.contrib.layers.fully_connected(stack_layer_22, 1, activation_fn=None)
return layer_32
def encoder_beta_msi(self,x,reuse=False):
with tf.variable_scope('hr_msi_beta') as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
layer_21 = tf.contrib.layers.fully_connected(x, self.nLRlevel[0], activation_fn=None)
stack_layer_21 = tf.concat([x,layer_21], 1)
layer_22 = tf.contrib.layers.fully_connected(stack_layer_21, self.nLRlevel[1], activation_fn=None)
stack_layer_22 = tf.concat([layer_22, stack_layer_21], 1)
layer_32 = tf.contrib.layers.fully_connected(stack_layer_22, 1, activation_fn=None)
return layer_32
def encoder_vsamples_hsi(self, x, hsize, reuse=False):
layer1, uniform = self.encoder_uniform_hsi(x,reuse)
uniform = tf.nn.sigmoid(uniform)
wb = self.encoder_beta_hsi(layer1,reuse)
wb = tf.nn.softplus(wb)
v_samples = self.construct_vsamples(uniform,wb,hsize)
return v_samples, uniform, wb
def encoder_vsamples_msi(self, x, hsize, reuse=False):
stack_layer_12, uniform = self.encoder_uniform_msi(x,reuse)
uniform = tf.nn.sigmoid(uniform)
wb = self.encoder_beta_msi(x,reuse)
wb = tf.nn.softplus(wb)
v_samples = self.construct_vsamples(uniform,wb,hsize)
return v_samples, uniform, wb
def construct_stick_break(self,vsample, dim, stick_size):
size = dim[0]*dim[1]
size = int(size)
remaining_stick = tf.ones(size, )
for i in range(stick_size):
[stick_segment, remaining_stick] = self.compute_latent_vars_break(i, remaining_stick, vsample)
if i == 0:
stick_segment_sum_lr = tf.expand_dims(stick_segment, 1)
else:
stick_segment_sum_lr = tf.concat([stick_segment_sum_lr, tf.expand_dims(stick_segment, 1)],1)
return stick_segment_sum_lr
def encoder_lr_hsi(self, x, reuse=False):
v_samples,uniform, wb = self.encoder_vsamples_hsi(x, self.num, reuse)
stick_segment_sum_lr = self.construct_stick_break(v_samples, self.dimlr, self.num)
return stick_segment_sum_lr
def encoder_hr_msi(self, x, reuse=False):
v_samples,v_uniform, v_beta = self.encoder_vsamples_msi(x, self.num, reuse)
stick_segment_sum_msi = self.construct_stick_break(v_samples, self.dimhr, self.num)
return stick_segment_sum_msi
def encoder_hr_msi_init(self, x, reuse=False):
v_samples,v_uniform, v_beta = self.encoder_vsamples_msi(x, self.num, reuse)
stick_segment_sum_msi_init = self.construct_stick_break(v_samples, self.dimlr, self.num)
return stick_segment_sum_msi_init
def decoder_hsi(self, x):
layer_1 = tf.matmul(x, self.wdecoder['lr_decoder_w1'])
layer_2 = tf.matmul(layer_1, self.wdecoder['lr_decoder_w2'])
return layer_2
def decoder_msi(self,x):
layer_1 = tf.matmul(x, self.wdecoder['lr_decoder_w1'])
layer_2 = tf.matmul(layer_1, self.wdecoder['lr_decoder_w2'])
layer_3 = tf.add(layer_2,self.input.mean_lr_hsi)
return layer_3
def gen_lrhsi(self, x, reuse=False):
encoder_op = self.encoder_lr_hsi(x, reuse)
decoder_op = self.decoder_hsi(encoder_op)
return decoder_op
def gen_hrmsi(self, x, reuse=False):
encoder_op = self.encoder_hr_msi(x, reuse)
decoder_hr = self.decoder_msi(encoder_op)
decoder_op = tf.matmul(decoder_hr,self.input.srf.T)
decoder_plus_m = tf.add(decoder_op, -self.input.mean_hr_msi)
# decoder_sphere = tf.matmul(decoder_plus_m,self.input.invsig_msi)
return decoder_plus_m
def gen_hrhsi(self, x, reuse=True):
encoder_op = self.encoder_hr_msi(x, reuse)
decoder_hr = self.decoder_msi(encoder_op)
return decoder_hr
def next_feed(self):
feed_dict = {self.hr_msi:self.input_hr_msi, self.lr_hsi:self.input_lr_hsi}
return feed_dict
def gen_msi_h(self, x, reuse = False):
encoder_init = self.encoder_hr_msi_init(x,reuse)
return encoder_init
def build_model(self):
# build model for lr hsi
y_pred_lrhsi = self.gen_lrhsi(self.lr_hsi, False)
y_true_lrhsi = self.lr_hsi
error_lrhsi = y_pred_lrhsi - y_true_lrhsi
lrhsi_loss_euc = tf.reduce_mean(tf.reduce_sum(tf.pow(error_lrhsi, 2),0))
#lrhsi_loss_euc = tf.reduce_mean(tf.pow(error_lrhsi, 2))
# geometric constraints
decoder_op = tf.matmul(self.wdecoder['lr_decoder_w1'], self.wdecoder['lr_decoder_w2'])
decoder = tf.add(decoder_op, self.input.mean_lr_hsi)
lrhsi_volume_loss = tf.reduce_mean(tf.matmul(tf.transpose(decoder),decoder))
# spatial sparse
eps = 0.00000001
lrhsi_top = self.encoder_lr_hsi(self.lr_hsi, reuse=True)
lrhsi_base_norm = tf.reduce_sum(lrhsi_top, 1, keepdims=True)
lrhsi_sparse = tf.div(lrhsi_top, (lrhsi_base_norm + eps))
lrhsi_loss_sparse = tf.reduce_mean(-tf.multiply(lrhsi_sparse, tf.log(lrhsi_sparse + eps)))
# lr hsi total loss
lrhsi_loss = lrhsi_loss_euc + self.vol_r * lrhsi_volume_loss + self.sp_r_lsi * lrhsi_loss_sparse
# for lr
theta_basic_decoder = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='lr_decoder')
theta_uniform_lrhsi = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='lr_hsi_uniform')
theta_beta_lrhsi = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='lr_hsi_beta')
counter_lrhsi = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
opt_lrhsi = ly.optimize_loss(loss=lrhsi_loss, learning_rate=self.initlrate,
optimizer=tf.train.AdamOptimizer if self.is_adam is True else tf.train.RMSPropOptimizer,
variables=theta_basic_decoder+theta_uniform_lrhsi+theta_beta_lrhsi,global_step=counter_lrhsi)
# build model for high resolution msi image
y_pred_hrmsi = self.gen_hrmsi(self.hr_msi, False)
y_true_hrmsi = self.hr_msi
error_hrmsi = y_pred_hrmsi - y_true_hrmsi
hrmsi_loss_euc = tf.reduce_mean(tf.reduce_sum(tf.pow(error_hrmsi, 2), 0))
#hrmsi_loss_euc = tf.reduce_mean(tf.pow(error_hrmsi, 2))
# spatial sparse
hrmsi_top = self.encoder_hr_msi(self.hr_msi, reuse=True)
hrmsi_base_norm = tf.reduce_sum(hrmsi_top, 1, keepdims=True)
hrmsi_sparse = tf.div(hrmsi_top, (hrmsi_base_norm + eps))
hrmsi_loss_sparse = tf.reduce_mean(-tf.multiply(hrmsi_sparse, tf.log(hrmsi_sparse + eps)))
theta_uniform_hrmsi = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='hr_msi_uniform')
theta_beta_hrmsi = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='hr_msi_beta')
# abundance init
msi_h = self.gen_msi_h(self.hr_msi,True)
error_init = msi_h - self.hr_msi_h
msih_init_loss = tf.reduce_mean(tf.pow(error_init, 2))
counter_init = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
opt_init = ly.optimize_loss(loss=msih_init_loss, learning_rate=self.initlrate,
optimizer=tf.train.AdamOptimizer if self.is_adam is True else tf.train.RMSPropOptimizer,
variables=theta_uniform_hrmsi+theta_beta_hrmsi,
global_step=counter_init)
# spectral loss
nom_pred = tf.reduce_sum(tf.pow(msi_h, 2),0)
nom_true = tf.reduce_sum(tf.pow(self.hr_msi_h, 2),0)
nom_base = tf.sqrt(tf.multiply(nom_pred, nom_true))
nom_top = tf.reduce_sum(tf.multiply(msi_h,self.hr_msi_h),0)
angle = tf.reduce_mean(tf.acos(tf.div(nom_top, (nom_base + eps))))
angle_loss = tf.div(angle,3.1416) # spectral loss
counter_angle = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
opt_angle = ly.optimize_loss(loss=angle_loss, learning_rate=self.initlrate,
optimizer=tf.train.AdamOptimizer if self.is_adam is True else tf.train.RMSPropOptimizer,
variables=theta_uniform_hrmsi+theta_beta_hrmsi,
global_step=counter_angle)
hrmsi_loss = hrmsi_loss_euc + self.sp_r_lsi * hrmsi_loss_sparse
# hrmsi_loss = hrmsi_loss_euc
counter_hrmsi = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
opt_hrmsi = ly.optimize_loss(loss=hrmsi_loss, learning_rate=self.initlrate,
optimizer=tf.train.AdamOptimizer if self.is_adam is True else tf.train.RMSPropOptimizer,
variables= theta_uniform_hrmsi+theta_beta_hrmsi,
global_step=counter_hrmsi)
return lrhsi_loss, opt_lrhsi, hrmsi_loss, opt_hrmsi, lrhsi_volume_loss, lrhsi_loss_sparse, hrmsi_loss_sparse, msih_init_loss, opt_init, angle_loss, opt_angle
def train(self, load_Path, save_dir, loadLRonly, tol,init_num):
lrhsi_loss, opt_lrhsi, hrmsi_loss, opt_hrmsi, lrhsi_volume_loss, lrhsi_loss_sparse, hrmsi_loss_sparse, msih_init_loss, opt_h_init,angle_loss, opt_angle = self.build_model()
self.sess.run(tf.global_variables_initializer())
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if os.path.exists(load_Path):
if loadLRonly:
# load part of the variables
vars = tf.contrib.slim.get_variables_to_restore()
variables_to_restore = [v for v in vars if v.name.startswith('lr_decoder/')] \
+ [v for v in vars if v.name.startswith('lr_hsi_uniform/')] \
+ [v for v in vars if v.name.startswith('lr_hsi_beta/')] \
+ [v for v in vars if v.name.startswith('hr_msi_uniform/')] \
+ [v for v in vars if v.name.startswith('hr_msi_beta/')]
saver = tf.train.Saver(variables_to_restore)
load_file = tf.train.latest_checkpoint(load_Path)
if load_file==None:
print('No checkpoint was saved.')
else:
saver.restore(self.sess,load_file)
else:
# load all the variables
saver = tf.train.Saver()
load_file = tf.train.latest_checkpoint(load_Path)
if load_file==None:
print('No checkpoint was saved.')
else:
saver.restore(self.sess, load_file)
else:
saver = tf.train.Saver()
results_file_name = pjoin(save_dir,"sb_" + "lrate_" + str(self.initlrate)+ ".txt")
# results_ckpt_name = pjoin(save_dir,"sb_" + "lrate_" + str(self.initlrate)+ ".ckpt")
results_file = open(results_file_name, 'a')
feed_dict = self.next_feed()
sam_hr = 10
sam_lr = 10
rate_decay = 0.99977
count = 0
stop_cont = 0
sam_total = zeros(self.epoch+1)
rmse_total = zeros(self.epoch+1)
sam_total[0] = 50
rmse_total[0] = 50
for epoch in range(self.epoch):
if sam_lr > tol:
_, lr_loss = self.sess.run([opt_lrhsi,lrhsi_loss], feed_dict=feed_dict)
self.initlrate = self.initlrate * rate_decay
self.vol_r = self.vol_r * rate_decay
self.sp_r_lsi = self.vol_r * rate_decay
if (epoch + 1) % 50 == 0:
# Report and save progress.
results = "epoch {}: LR HSI loss {:.12f} learing_rate {:.9f}"
results = results.format(epoch, lr_loss, self.initlrate)
print (results)
print ("\n")
results_file.write(results + "\n")
results_file.flush()
v_loss = self.sess.run(lrhsi_volume_loss)
results = "epoch {}: volumn loss {:.12f} learing_rate {:.9f}"
results = results.format(epoch, v_loss, self.initlrate)
print (results)
print ("\n")
results_file.write(results + "\n")
results_file.flush()
sp_hsi_loss = self.sess.run(lrhsi_loss_sparse, feed_dict=feed_dict)
results = "epoch {}: lr sparse loss {:.12f} learing_rate {:.9f}"
results = results.format(epoch, sp_hsi_loss, self.initlrate)
print (results)
print ("\n")
results_file.write(results + "\n\n")
results_file.flush()
img_lr = self.sess.run(self.gen_lrhsi(self.lr_hsi, reuse=True), feed_dict=feed_dict) + self.mean_lrhsi
rmse_lr, sam_lr = self.evaluation(img_lr,self.input.col_lr_hsi,'LR HSi',epoch,results_file)
if (epoch + 1) % 1000 == 0:
# saver = tf.train.Saver()
results_ckpt_name = pjoin(save_dir,
"epoch_" + str(epoch) + "_sam_" + str(round(sam_hr, 3)) + ".ckpt")
save_path = saver.save(self.sess, results_ckpt_name)
results = "weights saved at epoch {}"
results = results.format(epoch)
print(results)
print('\n')
if sam_lr <= tol:
if count == 0:
self.input_hr_msi_h = self.sess.run(self.encoder_lr_hsi(self.lr_hsi, True), feed_dict=feed_dict)
if self.initp == True:
while self.initp and count < init_num:
_, initloss = self.sess.run([opt_h_init,msih_init_loss],
feed_dict={self.hr_msi:self.input_lr_msi,
self.hr_msi_h:self.input_hr_msi_h})
initpanloss = "epoch {}: initloss of the msi: {:.9f}"
initpanloss = initpanloss.format(count,initloss)
print (initpanloss)
results_file.write(initpanloss + "\n")
results_file.flush()
count = count + 1
if (count) % 1000 == 0:
saver = tf.train.Saver()
if initloss<0.00001:
self.initp = False
_, msi_loss = self.sess.run([opt_hrmsi,hrmsi_loss], feed_dict=feed_dict)
self.initprate = self.initprate * rate_decay
self.sp_r_msi = self.sp_r_msi * rate_decay
if (epoch + 1) % 20 == 0:
# Report and save progress.
results = "epoch {}: HR MSI loss {:.12f} learing_rate {:.9f}"
results = results.format(epoch, msi_loss, self.initprate)
print(results)
print("\n")
results_file.write(results + "\n\n")
results_file.flush()
sp_msi_loss = self.sess.run(hrmsi_loss_sparse, feed_dict=feed_dict)
results = "epoch {}: hr sparse loss {:.12f} learing_rate {:.9f}"
results = results.format(epoch, sp_msi_loss, self.initprate)
print(results)
print("\n")
results_file.write(results + "\n\n")
results_file.flush()
_, angleloss = self.sess.run([opt_angle, angle_loss], feed_dict={self.hr_msi: self.input_lr_msi,
self.hr_msi_h: self.input_hr_msi_h})
angle = "Angle of the pan: {:.12f}"
angle = angle.format(angleloss)
print(angle)
results_file.write(angle + "\n")
results_file.flush()
# img_hr = self.sess.run(self.gen_hrmsi(self.hr_msi, reuse=True), feed_dict=feed_dict) + self.mean_hrmsi
# sam_hr = self.evaluation(img_hr,self.input.col_hr_msi,'HR MSI',epoch,results_file)
img_hr = self.sess.run(self.gen_hrhsi(self.hr_msi, reuse=True), feed_dict=feed_dict)
rmse_hr, sam_hr = self.evaluation(img_hr,self.input.col_hr_hsi,'HR MSI',epoch,results_file)
stop_cont = stop_cont + 1
sam_total[stop_cont] = sam_hr
rmse_total[stop_cont] = rmse_hr
if ((sam_total[stop_cont-1] / sam_total[stop_cont]) < 1 - 0.0001 and (rmse_total[stop_cont-1]/rmse_total[stop_cont]<1 - 0.0001)):
results_ckpt_name = pjoin(save_dir,"epoch_" + str(epoch) + "_sam_" + str(round(sam_hr, 3)) + ".ckpt")
save_path = saver.save(self.sess, results_ckpt_name)
print('training is done')
break;
return save_path
def evaluation(self,img_hr,img_tar,name,epoch,results_file):
# evalute the results
ref = img_tar*255.0
tar = img_hr*255.0
lr_flags = tar<0
tar[lr_flags]=0
hr_flags = tar>255.0
tar[hr_flags] = 255.0
#ref = ref.astype(np.int)
#tar = tar.astype(np.int)
diff = ref - tar;
size = ref.shape
rmse = np.sqrt( np.sum(np.sum(np.power(diff,2))) / (size[0]*size[1]));
# rmse_list.append(rmse)
# print('epoch '+str(epoch)+' '+'The RMSE of the ' + name + ' is: '+ str(rmse))
results = name + " epoch {}: RMSE {:.12f} "
results = results.format(epoch, rmse)
print (results)
results_file.write(results + "\n")
results_file.flush()
# spectral loss
nom_top = np.sum(np.multiply(ref, tar),0)
nom_pred = np.sqrt(np.sum(np.power(ref, 2),0))
nom_true = np.sqrt(np.sum(np.power(tar, 2),0))
nom_base = np.multiply(nom_pred, nom_true)
angle = np.arccos(np.divide(nom_top, (nom_base)))
angle = np.nan_to_num(angle)
sam = np.mean(angle)*180.0/3.14159
# sam_list.append(sam)
# print('epoch '+str(epoch)+' '+'The SAM of the ' + name + ' is: '+ str(sam)+'\n')
results = name + " epoch {}: SAM {:.12f} "
results = results.format(epoch, sam)
print (results)
print ("\n")
results_file.write(results + "\n")
results_file.flush()
return rmse, sam
def generate_hrhsi(self, save_dir, filename):
# self.sess.run(tf.global_variables_initializer())
gen_hrhsi = self.gen_hrhsi(self.hr_msi, reuse=False)
feed_dict = self.next_feed()
saver = tf.train.Saver()
save_path = tf.train.latest_checkpoint(filename)
# save_path = filename
if save_path == None:
print('No checkpoint was saved.')
else:
saver.restore(self.sess, save_path)
print(save_path + ' is loaded.')
# save hidden layers
hrhsi = self.sess.run(gen_hrhsi, feed_dict=feed_dict)
np.savetxt(save_dir + "/hrhsi.csv", hrhsi, delimiter=",")
| 24,758 | 45.539474 | 180 | py |
deep-viz-keras | deep-viz-keras-master/saliency.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute SaliencyMasks."""
import numpy as np
import keras.backend as K
class SaliencyMask(object):
"""Base class for saliency masks. Alone, this class doesn't do anything."""
def __init__(self, model, output_index=0):
"""Constructs a SaliencyMask.
Args:
model: the keras model used to make prediction
output_index: the index of the node in the last layer to take derivative on
"""
pass
def get_mask(self, input_image):
"""Returns an unsmoothed mask.
Args:
input_image: input image with shape (H, W, 3).
"""
pass
def get_smoothed_mask(self, input_image, stdev_spread=.2, nsamples=50):
"""Returns a mask that is smoothed with the SmoothGrad method.
Args:
input_image: input image with shape (H, W, 3).
"""
stdev = stdev_spread * (np.max(input_image) - np.min(input_image))
total_gradients = np.zeros_like(input_image)
for i in range(nsamples):
noise = np.random.normal(0, stdev, input_image.shape)
x_value_plus_noise = input_image + noise
total_gradients += self.get_mask(x_value_plus_noise)
return total_gradients / nsamples
class GradientSaliency(SaliencyMask):
r"""A SaliencyMask class that computes saliency masks with a gradient."""
def __init__(self, model, output_index=0):
# Define the function to compute the gradient
input_tensors = [model.input, # placeholder for input image tensor
K.learning_phase(), # placeholder for mode (train or test) tense
]
gradients = model.optimizer.get_gradients(model.output[0][output_index], model.input)
self.compute_gradients = K.function(inputs=input_tensors, outputs=gradients)
def get_mask(self, input_image):
"""Returns a vanilla gradient mask.
Args:
input_image: input image with shape (H, W, 3).
"""
# Execute the function to compute the gradient
x_value = np.expand_dims(input_image, axis=0)
gradients = self.compute_gradients([x_value, 0])[0][0]
return gradients
| 2,836 | 35.371795 | 93 | py |
deep-viz-keras | deep-viz-keras-master/integrated_gradients.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute an IntegratedGradients SaliencyMask."""
import numpy as np
from saliency import GradientSaliency
class IntegratedGradients(GradientSaliency):
"""A SaliencyMask class that implements the integrated gradients method.
https://arxiv.org/abs/1703.01365
"""
def GetMask(self, input_image, input_baseline=None, nsamples=100):
"""Returns a integrated gradients mask."""
if input_baseline == None:
input_baseline = np.zeros_like(input_image)
assert input_baseline.shape == input_image.shape
input_diff = input_image - input_baseline
total_gradients = np.zeros_like(input_image)
for alpha in np.linspace(0, 1, nsamples):
input_step = input_baseline + alpha * input_diff
total_gradients += super(IntegratedGradients, self).get_mast(input_step)
return total_gradients * input_diff
| 1,506 | 34.880952 | 84 | py |
deep-viz-keras | deep-viz-keras-master/guided_backprop.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilites to computed GuidedBackprop SaliencyMasks"""
from saliency import SaliencyMask
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.models import load_model
class GuidedBackprop(SaliencyMask):
"""A SaliencyMask class that computes saliency masks with GuidedBackProp.
This implementation copies the TensorFlow graph to a new graph with the ReLU
gradient overwritten as in the paper:
https://arxiv.org/abs/1412.6806
"""
GuidedReluRegistered = False
def __init__(self, model, output_index=0, custom_loss=None):
"""Constructs a GuidedBackprop SaliencyMask."""
if GuidedBackprop.GuidedReluRegistered is False:
@tf.RegisterGradient("GuidedRelu")
def _GuidedReluGrad(op, grad):
gate_g = tf.cast(grad > 0, "float32")
gate_y = tf.cast(op.outputs[0] > 0, "float32")
return gate_y * gate_g * grad
GuidedBackprop.GuidedReluRegistered = True
"""
Create a dummy session to set the learning phase to 0 (test mode in keras) without
inteferring with the session in the original keras model. This is a workaround
for the problem that tf.gradients returns error with keras models that contains
Dropout or BatchNormalization.
Basic Idea: save keras model => create new keras model with learning phase set to 0 => save
the tensorflow graph => create new tensorflow graph with ReLU replaced by GuiededReLU.
"""
model.save('/tmp/gb_keras.h5')
with tf.Graph().as_default():
with tf.Session().as_default():
K.set_learning_phase(0)
load_model('/tmp/gb_keras.h5', custom_objects={"custom_loss":custom_loss})
session = K.get_session()
tf.train.export_meta_graph()
saver = tf.train.Saver()
saver.save(session, '/tmp/guided_backprop_ckpt')
self.guided_graph = tf.Graph()
with self.guided_graph.as_default():
self.guided_sess = tf.Session(graph = self.guided_graph)
with self.guided_graph.gradient_override_map({'Relu': 'GuidedRelu'}):
saver = tf.train.import_meta_graph('/tmp/guided_backprop_ckpt.meta')
saver.restore(self.guided_sess, '/tmp/guided_backprop_ckpt')
self.imported_y = self.guided_graph.get_tensor_by_name(model.output.name)[0][output_index]
self.imported_x = self.guided_graph.get_tensor_by_name(model.input.name)
self.guided_grads_node = tf.gradients(self.imported_y, self.imported_x)
def get_mask(self, input_image):
"""Returns a GuidedBackprop mask."""
x_value = np.expand_dims(input_image, axis=0)
guided_feed_dict = {}
guided_feed_dict[self.imported_x] = x_value
gradients = self.guided_sess.run(self.guided_grads_node, feed_dict = guided_feed_dict)[0][0]
return gradients | 3,669 | 42.176471 | 106 | py |
deep-viz-keras | deep-viz-keras-master/utils.py | def show_image(image, grayscale = True, ax=None, title=''):
if ax is None:
plt.figure()
plt.axis('off')
if len(image.shape) == 2 or grayscale == True:
if len(image.shape) == 3:
image = np.sum(np.abs(image), axis=2)
vmax = np.percentile(image, 99)
vmin = np.min(image)
plt.imshow(image, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
plt.title(title)
else:
image = image + 127.5
image = image.astype('uint8')
plt.imshow(image)
plt.title(title)
def load_image(file_path):
im = PIL.Image.open(file_path)
im = np.asarray(im)
return im - 127.5 | 689 | 25.538462 | 65 | py |
deep-viz-keras | deep-viz-keras-master/visual_backprop.py | from saliency import SaliencyMask
import numpy as np
import keras.backend as K
from keras.layers import Input, Conv2DTranspose
from keras.models import Model
from keras.initializers import Ones, Zeros
class VisualBackprop(SaliencyMask):
"""A SaliencyMask class that computes saliency masks with VisualBackprop (https://arxiv.org/abs/1611.05418).
"""
def __init__(self, model, output_index=0):
"""Constructs a VisualProp SaliencyMask."""
inps = [model.input, K.learning_phase()] # input placeholder
outs = [layer.output for layer in model.layers] # all layer outputs
self.forward_pass = K.function(inps, outs) # evaluation function
self.model = model
def get_mask(self, input_image):
"""Returns a VisualBackprop mask."""
x_value = np.expand_dims(input_image, axis=0)
visual_bpr = None
layer_outs = self.forward_pass([x_value, 0])
for i in range(len(self.model.layers)-1, -1, -1):
if 'Conv2D' in str(type(self.model.layers[i])):
layer = np.mean(layer_outs[i], axis=3, keepdims=True)
layer = layer - np.min(layer)
layer = layer/(np.max(layer)-np.min(layer)+1e-6)
if visual_bpr is not None:
if visual_bpr.shape != layer.shape:
visual_bpr = self._deconv(visual_bpr)
visual_bpr = visual_bpr * layer
else:
visual_bpr = layer
return visual_bpr[0]
def _deconv(self, feature_map):
"""The deconvolution operation to upsample the average feature map downstream"""
x = Input(shape=(None, None, 1))
y = Conv2DTranspose(filters=1,
kernel_size=(3,3),
strides=(2,2),
padding='same',
kernel_initializer=Ones(),
bias_initializer=Zeros())(x)
deconv_model = Model(inputs=[x], outputs=[y])
inps = [deconv_model.input, K.learning_phase()] # input placeholder
outs = [deconv_model.layers[-1].output] # output placeholder
deconv_func = K.function(inps, outs) # evaluation function
return deconv_func([feature_map, 0])[0] | 2,406 | 40.5 | 112 | py |
lrec2020-coref | lrec2020-coref-master/scripts/create_crossval_train_predict.py | import sys
def gen(path_to_scorer, train_out_file, pred_out_file):
train_out=open(train_out_file, "w", encoding="utf-8")
pred_out=open(pred_out_file, "w", encoding="utf-8")
for i in range(10):
train_out.write ("python3 scripts/bert_coref.py -m train -w models/crossval/%s.model -t data/litbank_tenfold_splits/%s/train.conll -v data/litbank_tenfold_splits/%s/dev.conll -o preds/crossval/%s.dev.pred -s %s> logs/crossval/%s.log 2>&1\n" % (i, i, i, i, path_to_scorer, i))
pred_out.write("python3 scripts/bert_coref.py -m predict -w models/crossval/%s.model -v data/litbank_tenfold_splits/%s/test.conll -o preds/crossval/%s.goldmentions.test.preds -s %s\n" % (i, i, i, path_to_scorer))
train_out.close()
pred_out.close()
gen(sys.argv[1], sys.argv[2], sys.argv[3]) | 774 | 47.4375 | 277 | py |
lrec2020-coref | lrec2020-coref-master/scripts/bert_coref.py | import re
import os
from collections import Counter
import sys
import argparse
import pytorch_pretrained_bert
from pytorch_pretrained_bert.modeling import BertPreTrainedModel, BertModel, BertConfig
from pytorch_pretrained_bert import BertTokenizer
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
import torch
from torch import nn
import torch.optim as optim
import numpy as np
import random
import calc_coref_metrics
from torch.optim.lr_scheduler import ExponentialLR
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
bert_dim=768
HIDDEN_DIM=200
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LSTMTagger(BertPreTrainedModel):
def __init__(self, config, freeze_bert=False):
super(LSTMTagger, self).__init__(config)
hidden_dim=HIDDEN_DIM
self.hidden_dim=hidden_dim
self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False, do_basic_tokenize=False)
self.bert = BertModel.from_pretrained("bert-base-cased")
self.bert.eval()
if freeze_bert:
for param in self.bert.parameters():
param.requires_grad = False
self.distance_embeddings = nn.Embedding(11, 20)
self.sent_distance_embeddings = nn.Embedding(11, 20)
self.nested_embeddings = nn.Embedding(2, 20)
self.gender_embeddings = nn.Embedding(3, 20)
self.width_embeddings = nn.Embedding(12, 20)
self.quote_embeddings = nn.Embedding(3, 20)
self.lstm = nn.LSTM(4*bert_dim, hidden_dim, bidirectional=True, batch_first=True)
self.attention1 = nn.Linear(hidden_dim * 2, hidden_dim * 2)
self.attention2 = nn.Linear(hidden_dim * 2, 1)
self.mention_mention1 = nn.Linear( (3 * 2 * hidden_dim + 20 + 20) * 3 + 20 + 20 + 20 + 20, 150)
self.mention_mention2 = nn.Linear(150, 150)
self.mention_mention3 = nn.Linear(150, 1)
self.unary1 = nn.Linear(3 * 2 * hidden_dim + 20 + 20, 150)
self.unary2 = nn.Linear(150, 150)
self.unary3 = nn.Linear(150, 1)
self.drop_layer_020 = nn.Dropout(p=0.2)
self.tanh = nn.Tanh()
self.apply(self.init_bert_weights)
def get_mention_reps(self, input_ids=None, attention_mask=None, starts=None, ends=None, index=None, widths=None, quotes=None, matrix=None, transforms=None, doTrain=True):
starts=starts.to(device)
ends=ends.to(device)
widths=widths.to(device)
quotes=quotes.to(device)
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
transforms = transforms.to(device)
# matrix specifies which token positions (cols) are associated with which mention spans (row)
matrix=matrix.to(device) # num_sents x max_ents x max_words
# index specifies the location of the mentions in each sentence (which vary due to padding)
index=index.to(device)
sequence_outputs, pooled_outputs = self.bert(input_ids, token_type_ids=None, attention_mask=attention_mask)
all_layers = torch.cat((sequence_outputs[-1], sequence_outputs[-2], sequence_outputs[-3], sequence_outputs[-4]), 2)
embeds=torch.matmul(transforms,all_layers)
lstm_output, _ = self.lstm(embeds) # num_sents x max_words x 2 * hidden_dim
###########
# ATTENTION OVER MENTION
###########
attention_weights=self.attention2(self.tanh(self.attention1(lstm_output))) # num_sents x max_words x 1
attention_weights=torch.exp(attention_weights)
attx=attention_weights.squeeze(-1).unsqueeze(1).expand_as(matrix)
summer=attx*matrix
val=matrix*summer # num_sents x max_ents x max_words
val=val/torch.sum(1e-8+val,dim=2).unsqueeze(-1)
attended=torch.matmul(val, lstm_output) # num_sents x max_ents x 2 * hidden_dim
attended=attended.view(-1,2*self.hidden_dim)
lstm_output=lstm_output.contiguous()
position_output=lstm_output.view(-1, 2*self.hidden_dim)
# starts = token position of beginning of mention in flattened token list
start_output=torch.index_select(position_output, 0, starts)
# ends = token position of end of mention in flattened token list
end_output=torch.index_select(position_output, 0, ends)
# index = index of entity in flattened list of attended mention representations
mentions=torch.index_select(attended, 0, index)
width_embeds=self.width_embeddings(widths)
quote_embeds=self.quote_embeddings(quotes)
span_representation=torch.cat((start_output, end_output, mentions, width_embeds, quote_embeds), 1)
if doTrain:
return span_representation
else:
# detach tensor from computation graph at test time or memory will blow up
return span_representation.detach()
def forward(self, matrix, index, truth=None, names=None, token_positions=None, starts=None, ends=None, widths=None, input_ids=None, attention_mask=None, transforms=None, quotes=None):
doTrain=False
if truth is not None:
doTrain=True
zeroTensor=torch.FloatTensor([0]).to(device)
all_starts=None
all_ends=None
span_representation=None
all_all=[]
for b in range(len(matrix)):
span_reps=self.get_mention_reps(input_ids=input_ids[b], attention_mask=attention_mask[b], starts=starts[b], ends=ends[b], index=index[b], widths=widths[b], quotes=quotes[b], transforms=transforms[b], matrix=matrix[b], doTrain=doTrain)
if b == 0:
span_representation=span_reps
all_starts=starts[b]
all_ends=ends[b]
else:
span_representation=torch.cat((span_representation, span_reps), 0)
all_starts=torch.cat((all_starts, starts[b]), 0)
all_ends=torch.cat((all_ends, ends[b]), 0)
all_starts=all_starts.to(device)
all_ends=all_ends.to(device)
num_mentions,=all_starts.shape
running_loss=0
curid=-1
curid+=1
assignments=[]
seen={}
ch=0
token_positions=np.array(token_positions)
mention_index=np.arange(num_mentions)
unary_scores=self.unary3(self.tanh(self.drop_layer_020(self.unary2(self.tanh(self.drop_layer_020(self.unary1(span_representation)))))))
for i in range(num_mentions):
if i == 0:
# the first mention must start a new entity; this doesn't affect training (since the loss must be 0) so we can skip it.
if truth is None:
assignment=curid
curid+=1
assignments.append(assignment)
continue
MAX_PREVIOUS_MENTIONS=300
first=0
if truth is None:
if len(names[i]) == 1 and names[i][0].lower() in {"he", "his", "her", "she", "him", "they", "their", "them", "it", "himself", "its", "herself", "themselves"}:
MAX_PREVIOUS_MENTIONS=20
first=i-MAX_PREVIOUS_MENTIONS
if first < 0:
first=0
targets=span_representation[first:i]
cp=span_representation[i].expand_as(targets)
dists=[]
nesteds=[]
# get distance in mentions
distances=i-mention_index[first:i]
dists=vec_get_distance_bucket(distances)
dists=torch.LongTensor(dists).to(device)
distance_embeds=self.distance_embeddings(dists)
# get distance in sentences
sent_distances=token_positions[i]-token_positions[first:i]
sent_dists=vec_get_distance_bucket(sent_distances)
sent_dists=torch.LongTensor(sent_dists).to(device)
sent_distance_embeds=self.sent_distance_embeddings(sent_dists)
# is the current mention nested within a previous one?
res1=(all_starts[first:i] <= all_starts[i])
res2=(all_ends[i] <= all_ends[first:i])
nesteds=(res1*res2).long()
nesteds_embeds=self.nested_embeddings(nesteds)
res1=(all_starts[i] <= all_starts[first:i])
res2=(all_ends[first:i] <= all_ends[i])
nesteds=(res1*res2).long()
nesteds_embeds2=self.nested_embeddings(nesteds)
elementwise=cp*targets
concat=torch.cat((cp, targets, elementwise, distance_embeds, sent_distance_embeds, nesteds_embeds, nesteds_embeds2), 1)
preds=self.mention_mention3(self.tanh(self.drop_layer_020(self.mention_mention2(self.tanh(self.drop_layer_020(self.mention_mention1(concat)))))))
preds=preds + unary_scores[i] + unary_scores[first:i]
preds=preds.squeeze(-1)
if truth is not None:
# zero is the score for the dummy antecedent/new entity
preds=torch.cat((preds, zeroTensor))
golds_sum=0.
preds_sum=torch.logsumexp(preds, 0)
if len(truth[i]) == 1 and truth[i][-1] not in seen:
golds_sum=0.
seen[truth[i][-1]]=1
else:
golds=torch.index_select(preds, 0, torch.LongTensor(truth[i]).to(device))
golds_sum=torch.logsumexp(golds, 0)
# want to maximize (golds_sum-preds_sum), so minimize (preds_sum-golds_sum)
diff=preds_sum-golds_sum
running_loss+=diff
else:
assignment=None
if i == 0:
assignment=curid
curid+=1
else:
arg_sorts=torch.argsort(preds, descending=True)
k=0
while k < len(arg_sorts):
cand_idx=arg_sorts[k]
if preds[cand_idx] > 0:
cand_assignment=assignments[cand_idx+first]
assignment=cand_assignment
ch+=1
break
else:
assignment=curid
curid+=1
break
k+=1
assignments.append(assignment)
if truth is not None:
return running_loss
else:
return assignments
def get_mention_width_bucket(dist):
if dist < 10:
return dist + 1
return 11
def get_distance_bucket(dist):
if dist < 5:
return dist+1
elif dist >= 5 and dist <= 7:
return 6
elif dist >= 8 and dist <= 15:
return 7
elif dist >= 16 and dist <= 31:
return 8
elif dist >= 32 and dist <= 63:
return 9
return 10
vec_get_distance_bucket=np.vectorize(get_distance_bucket)
def get_inquote(start, end, sent):
inQuote=False
quotes=[]
for token in sent:
if token == "“" or token == "\"":
if inQuote == True:
inQuote=False
else:
inQuote=True
quotes.append(inQuote)
for i in range(start, end+1):
if quotes[i] == True:
return 1
return 0
def print_conll(name, sents, all_ents, assignments, out):
doc_id, part_id=name
mapper=[]
idd=0
for ent in all_ents:
mapper_e=[]
for e in ent:
mapper_e.append(idd)
idd+=1
mapper.append(mapper_e)
out.write("#begin document (%s); part %s\n" % (doc_id, part_id))
for s_idx, sent in enumerate(sents):
ents=all_ents[s_idx]
for w_idx, word in enumerate(sent):
if w_idx == 0 or w_idx == len(sent)-1:
continue
label=[]
for idx, (start, end) in enumerate(ents):
if start == w_idx and end == w_idx:
eid=assignments[mapper[s_idx][idx]]
label.append("(%s)" % eid)
elif start == w_idx:
eid=assignments[mapper[s_idx][idx]]
label.append("(%s" % eid)
elif end == w_idx:
eid=assignments[mapper[s_idx][idx]]
label.append("%s)" % eid)
out.write("%s\t%s\t%s\t%s\t_\t_\t_\t_\t_\t_\t_\t_\t%s\n" % (doc_id, part_id, w_idx-1, word, '|'.join(label)))
if len(sent) > 2:
out.write("\n")
out.write("#end document\n")
def test(model, test_all_docs, test_all_ents, test_all_named_ents, test_all_max_words, test_all_max_ents, test_doc_names, outfile, iterr, goldFile, path_to_scorer, doTest=False):
out=open(outfile, "w", encoding="utf-8")
# for each document
for idx in range(len(test_all_docs)):
d,p=test_doc_names[idx]
d=re.sub("/", "_", d)
test_doc=test_all_docs[idx]
test_ents=test_all_ents[idx]
max_words=test_all_max_words[idx]
max_ents=test_all_max_ents[idx]
names=[]
for n_idx, sent in enumerate(test_ents):
for ent in sent:
name=test_doc[n_idx][ent[0]:ent[1]+1]
names.append(name)
named_index={}
for sidx, sentence in enumerate(test_all_named_ents[idx]):
for start, end, _ in sentence:
named_index[(sidx, start, end)]=1
is_named=[]
for sidx, sentence in enumerate(test_all_ents[idx]):
for (start, end) in sentence:
if (sidx, start, end) in named_index:
is_named.append(1)
else:
is_named.append(0)
test_matrix, test_index, test_token_positions, test_ent_spans, test_starts, test_ends, test_widths, test_data, test_masks, test_transforms, test_quotes=get_data(model, test_doc, test_ents, max_ents, max_words)
assignments=model.forward(test_matrix, test_index, names=names, token_positions=test_token_positions, starts=test_starts, ends=test_ends, widths=test_widths, input_ids=test_data, attention_mask=test_masks, transforms=test_transforms, quotes=test_quotes)
print_conll(test_doc_names[idx], test_doc, test_ents, assignments, out)
out.close()
if doTest:
print("Goldfile: %s" % goldFile)
print("Predfile: %s" % outfile)
bcub_f, avg=calc_coref_metrics.get_conll(path_to_scorer, gold=goldFile, preds=outfile)
print("Iter %s, Average F1: %.3f, bcub F1: %s" % (iterr, avg, bcub_f))
sys.stdout.flush()
return avg
def get_matrix(list_of_entities, max_words, max_ents):
matrix=np.zeros((max_ents, max_words))
for idx, (start, end) in enumerate(list_of_entities):
for i in range(start, end+1):
matrix[idx,i]=1
return matrix
def get_data(model, doc, ents, max_ents, max_words):
batchsize=128
token_positions=[]
ent_spans=[]
persons=[]
inquotes=[]
batch_matrix=[]
matrix=[]
max_words_batch=[]
max_ents_batch=[]
max_w=1
max_e=1
sent_count=0
for idx, sent in enumerate(doc):
if len(sent) > max_w:
max_w=len(sent)
if len(ents[idx]) > max_e:
max_e = len(ents[idx])
sent_count+=1
if sent_count == batchsize:
max_words_batch.append(max_w)
max_ents_batch.append(max_e)
sent_count=0
max_w=0
max_e=0
if sent_count > 0:
max_words_batch.append(max_w)
max_ents_batch.append(max_e)
batch_count=0
for idx, sent in enumerate(doc):
matrix.append(get_matrix(ents[idx], max_words_batch[batch_count], max_ents_batch[batch_count]))
if len(matrix) == batchsize:
batch_matrix.append(torch.FloatTensor(matrix))
matrix=[]
batch_count+=1
if len(matrix) > 0:
batch_matrix.append(torch.FloatTensor(matrix))
batch_index=[]
batch_quotes=[]
batch_ent_spans=[]
index=[]
abs_pos=0
sent_count=0
b=0
for idx, sent in enumerate(ents):
for i in range(len(sent)):
index.append(sent_count*max_ents_batch[b] + i)
s,e=sent[i]
token_positions.append(idx)
ent_spans.append(e-s)
phrase=' '.join(doc[idx][s:e+1])
inquotes.append(get_inquote(s, e, doc[idx]))
abs_pos+=len(doc[idx])
sent_count+=1
if sent_count == batchsize:
batch_index.append(torch.LongTensor(index))
batch_quotes.append(torch.LongTensor(inquotes))
batch_ent_spans.append(ent_spans)
index=[]
inquotes=[]
ent_spans=[]
sent_count=0
b+=1
if sent_count > 0:
batch_index.append(torch.LongTensor(index))
batch_quotes.append(torch.LongTensor(inquotes))
batch_ent_spans.append(ent_spans)
all_masks=[]
all_transforms=[]
all_data=[]
batch_masks=[]
batch_transforms=[]
batch_data=[]
# get ids and pad sentence
for sent in doc:
tok_ids=[]
input_mask=[]
transform=[]
all_toks=[]
n=0
for idx, word in enumerate(sent):
toks=model.tokenizer.tokenize(word)
all_toks.append(toks)
n+=len(toks)
cur=0
for idx, word in enumerate(sent):
toks=all_toks[idx]
ind=list(np.zeros(n))
for j in range(cur,cur+len(toks)):
ind[j]=1./len(toks)
cur+=len(toks)
transform.append(ind)
tok_id=model.tokenizer.convert_tokens_to_ids(toks)
assert len(tok_id) == len(toks)
tok_ids.extend(tok_id)
input_mask.extend(np.ones(len(toks)))
token=word.lower()
all_masks.append(input_mask)
all_data.append(tok_ids)
all_transforms.append(transform)
if len(all_masks) == batchsize:
batch_masks.append(all_masks)
batch_data.append(all_data)
batch_transforms.append(all_transforms)
all_masks=[]
all_data=[]
all_transforms=[]
if len(all_masks) > 0:
batch_masks.append(all_masks)
batch_data.append(all_data)
batch_transforms.append(all_transforms)
for b in range(len(batch_data)):
max_len = max([len(sent) for sent in batch_data[b]])
for j in range(len(batch_data[b])):
blen=len(batch_data[b][j])
for k in range(blen, max_len):
batch_data[b][j].append(0)
batch_masks[b][j].append(0)
for z in range(len(batch_transforms[b][j])):
batch_transforms[b][j][z].append(0)
for k in range(len(batch_transforms[b][j]), max_words_batch[b]):
batch_transforms[b][j].append(np.zeros(max_len))
batch_data[b]=torch.LongTensor(batch_data[b])
batch_transforms[b]=torch.FloatTensor(batch_transforms[b])
batch_masks[b]=torch.FloatTensor(batch_masks[b])
tok_pos=0
starts=[]
ends=[]
widths=[]
batch_starts=[]
batch_ends=[]
batch_widths=[]
sent_count=0
b=0
for idx, sent in enumerate(ents):
for i in range(len(sent)):
s,e=sent[i]
starts.append(tok_pos+s)
ends.append(tok_pos+e)
widths.append(get_mention_width_bucket(e-s))
sent_count+=1
tok_pos+=max_words_batch[b]
if sent_count == batchsize:
batch_starts.append(torch.LongTensor(starts))
batch_ends.append(torch.LongTensor(ends))
batch_widths.append(torch.LongTensor(widths))
starts=[]
ends=[]
widths=[]
tok_pos=0
sent_count=0
b+=1
if sent_count > 0:
batch_starts.append(torch.LongTensor(starts))
batch_ends.append(torch.LongTensor(ends))
batch_widths.append(torch.LongTensor(widths))
return batch_matrix, batch_index, token_positions, ent_spans, batch_starts, batch_ends, batch_widths, batch_data, batch_masks, batch_transforms, batch_quotes
def get_ant_labels(all_doc_sents, all_doc_ents):
max_words=0
max_ents=0
mention_id=0
big_ents={}
doc_antecedent_labels=[]
big_doc_ents=[]
for idx, sent in enumerate(all_doc_sents):
if len(sent) > max_words:
max_words=len(sent)
this_sent_ents=[]
all_sent_ents=sorted(all_doc_ents[idx], key=lambda x: (x[0], x[1]))
if len(all_sent_ents) > max_ents:
max_ents=len(all_sent_ents)
for (w_idx_start, w_idx_end, eid) in all_sent_ents:
this_sent_ents.append((w_idx_start, w_idx_end))
coref={}
if eid in big_ents:
coref=big_ents[eid]
else:
coref={mention_id:1}
vals=sorted(coref.keys())
if eid not in big_ents:
big_ents[eid]={}
big_ents[eid][mention_id]=1
mention_id+=1
doc_antecedent_labels.append(vals)
big_doc_ents.append(this_sent_ents)
return doc_antecedent_labels, big_doc_ents, max_words, max_ents
def read_conll(filename, model=None):
docid=None
partID=None
# collection
all_sents=[]
all_ents=[]
all_antecedent_labels=[]
all_max_words=[]
all_max_ents=[]
all_doc_names=[]
all_named_ents=[]
# for one doc
all_doc_sents=[]
all_doc_ents=[]
all_doc_named_ents=[]
# for one sentence
sent=[]
ents=[]
sent.append("[SEP]")
sid=0
named_ents=[]
cur_tokens=0
max_allowable_tokens=400
cur_tid=0
open_count=0
with open(filename, encoding="utf-8") as file:
for line in file:
if line.startswith("#begin document"):
all_doc_ents=[]
all_doc_sents=[]
all_doc_named_ents=[]
open_ents={}
open_named_ents={}
sid=0
docid=None
matcher=re.match("#begin document \((.*)\); part (.*)$", line.rstrip())
if matcher != None:
docid=matcher.group(1)
partID=matcher.group(2)
print(docid)
elif line.startswith("#end document"):
all_sents.append(all_doc_sents)
doc_antecedent_labels, big_ents, max_words, max_ents=get_ant_labels(all_doc_sents, all_doc_ents)
all_ents.append(big_ents)
all_named_ents.append(all_doc_named_ents)
all_antecedent_labels.append(doc_antecedent_labels)
all_max_words.append(max_words+1)
all_max_ents.append(max_ents+1)
all_doc_names.append((docid,partID))
else:
parts=re.split("\s+", line.rstrip())
if len(parts) < 2 or (cur_tokens >= max_allowable_tokens and open_count == 0):
sent.append("[CLS]")
all_doc_sents.append(sent)
ents=sorted(ents, key=lambda x: (x[0], x[1]))
all_doc_ents.append(ents)
all_doc_named_ents.append(named_ents)
ents=[]
named_ents=[]
sent=[]
sent.append("[SEP]")
sid+=1
cur_tokens=0
cur_tid=0
if len(parts) < 2:
continue
# +1 to account for initial [SEP]
tid=cur_tid+1
token=parts[3]
coref=parts[-1].split("|")
b_toks=model.tokenizer.tokenize(token)
cur_tokens+=len(b_toks)
cur_tid+=1
for c in coref:
if c.startswith("(") and c.endswith(")"):
c=re.sub("\(", "", c)
c=int(re.sub("\)", "", c))
ents.append((tid, tid, c))
elif c.startswith("("):
c=int(re.sub("\(", "", c))
if c not in open_ents:
open_ents[c]=[]
open_ents[c].append(tid)
open_count+=1
elif c.endswith(")"):
c=int(re.sub("\)", "", c))
assert c in open_ents
start_tid=open_ents[c].pop()
open_count-=1
ents.append((start_tid, tid, c))
ner=parts[10].split("|")
for c in ner:
try:
if c.startswith("(") and c.endswith(")"):
c=re.sub("\(", "", c)
c=int(re.sub("\)", "", c))
named_ents.append((tid, tid, c))
elif c.startswith("("):
c=int(re.sub("\(", "", c))
if c not in open_named_ents:
open_named_ents[c]=[]
open_named_ents[c].append(tid)
elif c.endswith(")"):
c=int(re.sub("\)", "", c))
assert c in open_named_ents
start_tid=open_named_ents[c].pop()
named_ents.append((start_tid, tid, c))
except:
pass
sent.append(token)
return all_sents, all_ents, all_named_ents, all_antecedent_labels, all_max_words, all_max_ents, all_doc_names
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t','--trainData', help='Folder containing train data', required=False)
parser.add_argument('-v','--valData', help='Folder containing test data', required=False)
parser.add_argument('-m','--mode', help='mode {train, predict}', required=False)
parser.add_argument('-w','--model', help='modelFile (to write to or read from)', required=False)
parser.add_argument('-o','--outFile', help='outFile', required=False)
parser.add_argument('-s','--path_to_scorer', help='Path to coreference scorer', required=False)
args = vars(parser.parse_args())
mode=args["mode"]
modelFile=args["model"]
valData=args["valData"]
outfile=args["outFile"]
path_to_scorer=args["path_to_scorer"]
cache_dir = os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(0))
model = LSTMTagger.from_pretrained('bert-base-cased',
cache_dir=cache_dir,
freeze_bert=True)
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
lr_scheduler=ExponentialLR(optimizer, gamma=0.999)
if mode == "train":
trainData=args["trainData"]
all_docs, all_ents, all_named_ents, all_truth, all_max_words, all_max_ents, doc_ids=read_conll(trainData, model)
test_all_docs, test_all_ents, test_all_named_ents, test_all_truth, test_all_max_words, test_all_max_ents, test_doc_ids=read_conll(valData, model)
best_f1=0.
cur_steps=0
best_idx=0
patience=10
for i in range(100):
model.train()
bigloss=0.
for idx in range(len(all_docs)):
if idx % 10 == 0:
print(idx, "/", len(all_docs))
sys.stdout.flush()
max_words=all_max_words[idx]
max_ents=all_max_ents[idx]
matrix, index, token_positions, ent_spans, starts, ends, widths, input_ids, masks, transforms, quotes=get_data(model, all_docs[idx], all_ents[idx], max_ents, max_words)
if max_ents > 1:
model.zero_grad()
loss=model.forward(matrix, index, truth=all_truth[idx], names=None, token_positions=token_positions, starts=starts, ends=ends, widths=widths, input_ids=input_ids, attention_mask=masks, transforms=transforms, quotes=quotes)
loss.backward()
optimizer.step()
cur_steps+=1
if cur_steps % 100 == 0:
lr_scheduler.step()
bigloss+=loss.item()
print(bigloss)
model.eval()
doTest=False
if i >= 2:
doTest=True
avg_f1=test(model, test_all_docs, test_all_ents, test_all_named_ents, test_all_max_words, test_all_max_ents, test_doc_ids, outfile, i, valData, path_to_scorer, doTest=doTest)
if doTest:
if avg_f1 > best_f1:
torch.save(model.state_dict(), modelFile)
print("Saving model ... %.3f is better than %.3f" % (avg_f1, best_f1))
best_f1=avg_f1
best_idx=i
if i-best_idx > patience:
print ("Stopping training at epoch %s" % i)
break
elif mode == "predict":
model.load_state_dict(torch.load(modelFile, map_location=device))
model.eval()
test_all_docs, test_all_ents, test_all_named_ents, test_all_truth, test_all_max_words, test_all_max_ents, test_doc_ids=read_conll(valData, model=model)
test(model, test_all_docs, test_all_ents, test_all_named_ents, test_all_max_words, test_all_max_ents, test_doc_ids, outfile, 0, valData, path_to_scorer, doTest=True)
| 24,602 | 24.233846 | 255 | py |
lrec2020-coref | lrec2020-coref-master/scripts/calc_coref_metrics.py | import subprocess, re, sys
def get_coref_score(metric, path_to_scorer, gold=None, preds=None):
output=subprocess.check_output(["perl", path_to_scorer, metric, preds, gold]).decode("utf-8")
output=output.split("\n")[-3]
matcher=re.search("Coreference: Recall: \(.*?\) (.*?)% Precision: \(.*?\) (.*?)% F1: (.*?)%", output)
if matcher is not None:
recall=float(matcher.group(1))
precision=float(matcher.group(2))
f1=float(matcher.group(3))
return recall, precision, f1
def get_conll(path_to_scorer, gold=None, preds=None):
bcub_r, bcub_p, bcub_f=get_coref_score("bcub", path_to_scorer, gold, preds)
muc_r, muc_p, muc_f=get_coref_score("muc", path_to_scorer, gold, preds)
ceaf_r, ceaf_p, ceaf_f=get_coref_score("ceafe", path_to_scorer, gold, preds)
print("bcub:\t%.1f" % bcub_f)
print("muc:\t%.1f" % muc_f)
print("ceaf:\t%.1f" % ceaf_f)
avg=(bcub_f + muc_f + ceaf_f)/3.
print("Average F1: %.1f" % (avg))
# Generate Latex table
# print("%.1f&%.1f&%.1f&%.1f" % (bcub_f, muc_f, ceaf_f, avg))
return bcub_f, avg
if __name__ == "__main__":
goldFile=sys.argv[1]
predFile=sys.argv[2]
scorer=sys.argv[3]
bcub_f, avg=get_conll(scorer, gold=goldFile, preds=predFile)
| 1,191 | 30.368421 | 102 | py |
lrec2020-coref | lrec2020-coref-master/scripts/create_crossval_train.py | import sys
def gen(path_to_scorer, train_out_file, pred_out_file):
train_out=open(train_out_file, "w", encoding="utf-8")
pred_out=open(pred_out_file, "w", encoding="utf-8")
for i in range(10):
train_out.write ("python3 scripts/bert_coref.py -m train -w models/crossval/%s.model -t data/litbank_tenfold_splits/%s/train.conll -v data/litbank_tenfold_splits/%s/dev.conll -o preds/crossval/%s.dev.pred -s %s> logs/crossval/%s.log 2>&1\n" % (i, i, i, i, path_to_scorer, i))
pred_out.write("python3 scripts/bert_coref.py -m predict -w models/crossval/%s.model -v data/litbank_tenfold_splits/%s/test.conll -o preds/crossval/%s.goldmentions.test.preds -s %s\n" % (i, i, i, path_to_scorer))
train_out.close()
pred_out.close()
gen(sys.argv[1], sys.argv[2], sys.argv[3]) | 774 | 47.4375 | 277 | py |
lrec2020-coref | lrec2020-coref-master/scripts/create_crossval.py | """
Create 10-fold cross-validation data from LitBank data.
"""
import sys, os, re
def create_data(ids, infolder, outfile):
out=open(outfile, "w", encoding="utf-8")
for idd in ids:
infile="%s/%s.conll" % (infolder, idd)
with open(infile) as file:
for line in file:
out.write("%s\n" % line.rstrip())
out.close()
def get_ids_from_filename(filename):
ids=[]
with open(filename) as file:
for line in file:
idd=line.rstrip()
idd=re.sub(".tsv$", "", idd)
ids.append(idd)
return ids
def proc(split_folder, infolder, out_top_folder):
for split in range(10):
train_ids=get_ids_from_filename("%s/%s/train.ids" % (split_folder, split))
dev_ids=get_ids_from_filename("%s/%s/dev.ids" % (split_folder, split))
test_ids=get_ids_from_filename("%s/%s/test.ids" % (split_folder, split))
outfolder="%s/%s" % (out_top_folder, split)
try:
os.makedirs(outfolder)
except:
pass
outTrainFile="%s/%s" % (outfolder, "train.conll")
create_data(train_ids, infolder, outTrainFile)
outTestFile="%s/%s" % (outfolder, "test.conll")
create_data(test_ids, infolder, outTestFile)
outDevFile="%s/%s" % (outfolder, "dev.conll")
create_data(dev_ids, infolder, outDevFile)
# python scripts/create_crossval.py data/litbank_tenfold_splits data/original/conll/ data/litbank_tenfold_splits
filename=sys.argv[1]
infolder=sys.argv[2]
out_top_folder=sys.argv[3]
proc(filename, infolder, out_top_folder) | 1,431 | 24.122807 | 113 | py |
NGS | NGS-master/main_SNPDistancesWholeGenomes.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 17:46:00 2018
Changchuan Yin
Dept. of Mathematics, Statistics and Computer Science.
University of Illinois at Chicago
Email: [email protected]
Citation:
Yin., C., & Yau., S.S.-T (2018). Whole genome single nucleotide polymorphism genotyping of Staphylococcus aureus.
Communications in Information and Systems, 2018
"""
#-----------------------------------------------------------------------------------------------
# Program to compute the pair-wise distances between SNP variants from .vcf files of whole genome,
# the vcf files were created by mapping NGS reads of a genome onto a reference genome.
# The distance is for constructing a phylogenetic tree.
#-----------------------------------------------------------------------------------------------
import gene_snp as gs
#------------------------------------------------------------------------------
# Compute the pair-wise Jaccard distances between two vcf calls
file = open('distSNPs.txt', 'w')
'''
genes={'SRR7295360.vcf':'S.aureus HPV107','SRR7295358.vcf':'S.aureus NRS2','ERR2541869.vcf':'S.aureus ST88','ERR2275539.vcf':'S.aureus MSSA-HWC2014',\
'SRR5714648.vcf':'S.aureus MSSA-H41','SRR6675978.vcf':'S.aureus MSSA-ST97','ERR2541870.vcf':'S.aureus Nigeria','SRR1014703.vcf':'S.aureus USA400-BAA1752 CC1-ST1',\
'SRR1014705.vcf':'S.aureus CC80-24329-ST153','SRR1014706.vcf':'S.aureus USA700-NRS386 CC72-ST72','SRR1014707.vcf':'S.aureus USA700-GA-442 CC72-ST72','SRR1014709.vcf':'S.aureus USA800-NY-313 CC5-ST83',\
'SRR1014711.vcf':'S.aureus USA100-OR-293 CC5-ST5','SRR1014712.vcf':'S.aureus USA100-NY-76 CC5-ST5','SRR1014713.vcf':'S.aureus USA100-NRS382 CC5-ST5','SRR1014714.vcf':'S.aureus USA100-NY-54 CC5-ST105',\
'SRR1014715.vcf':'S.aureus USA100-CA-126 CC5-ST5','SRR1014716.vcf':'S.aureus USA100-CA-248 CC5-ST5','SRR1014717.vcf':'S.aureus USA1000-CA-629 CC59-ST87','ERR377327.vcf':'S.aureus USA300-RU-CAMP-29c',\
'ERR2276455.vcf':'S.aureus USA300-RU-CAMP-P29a','ERR2276459.vcf':'S.aureus USA300-RU-CAMP-P29b','ERR2541868.vcf':'S.aureus ST88-a','ERR2541871.vcf':'S.aureus ST88-b',\
'SRR6304957.vcf':'S.aureus PtA04-T3','SRR6304955.vcf':'S.aureus PtA02-T1','SRR5617496.vcf':'S.aureus USA300-22862_R1','SRR1014694.vcf':'S.aureus USA300-R-54',\
'SRR1014708.vcf':'S.aureus USA800-NRS387 CC5-ST5','SRR4019421.vcf':'S.aureus TX-AML1601921','SRR1014724.vcf':'S.aureus USA600-CA-347 CC45-ST45','SRR1014722.vcf':'S.aureus USA600-BAA1754 CC45-ST45',\
'SRR1014700.vcf':'S.aureus USA500-NRS385E','SRR1014720.vcf':'S.aureus USA200-NRS383 CC30-ST346','ERR2562460.vcf':'S.aureus CC398-ST899','SRR6475664.vcf':'S.aureus ST398','ERR1795563.vcf':'S.aureus MSSA-SHAIPI'}
'''
genes={'SRR7295360.vcf':'S.aureus HPV107','SRR7295358.vcf':'S.aureus NRS2','ERR2541869.vcf':'S.aureus ST88','ERR2275539.vcf':'S.aureus MSSA-HWC2014'}
n=len(genes)
keys=genes.keys()
keys=list(keys)
names=genes.values()
names=list(names)
if len(names) > len(set(names)):
print('Not unique strain names!')
else:
for i in range(0,n):
keyA=keys[i]
nameA=names[i]
callsetA,variantsA,variantSNPsA = gs.getVariants(keyA)
for j in range(i+1,n):
keyB=keys[j]
nameB=names[j]
callsetB,variantsB,variantSNPsB = gs.getVariants(keyB)
dist=gs.getJaccabDistSNP(variantSNPsA,variantSNPsB)
print(keyA,keyB,dist)
strDist=nameA+','+nameB+','+str(dist)+'\n'
file.write(strDist)
file.close()
| 3,494 | 50.397059 | 216 | py |
NGS | NGS-master/gene_snp.py | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 17:46:00 2018
Changchuan Yin
Dept. of Mathematics, Statistics and Computer Science.
University of Illinois at Chicago
Email: [email protected]
Citation:
Yin., C., & Yau., S.S.-T (2018). Whole genome single nucleotide polymorphism genotyping of Staphylococcus aureus.
Communications in Information and Systems, 2018
"""
#------------------------------------------------------------------------------------------------
# Library to compute SNP variants from a .vcf file, the vcf file was created by mapping NGS reads
# of a genome onto a reference genome.
# Required module library: scikit-allel/https://scikit-allel.readthedocs.io/en/latest/
# pip install scikit-allel
# pip install h5py
#-----------------------------------------------------------------------------------------------
import numpy as np
import h5py
from pathlib import Path
import allel
#----------------------------------------------------------------------------
# Helper functions
def getEndItems(data):
x=getSubList(data)
se=[]
seData=[]
for y in x:
if len(y)>1:
se.append(y[0])
se.append(y[-1])
m=y[-1]-y[0]+1
se.append(m)
seData.append(se)
se=[]
return seData
def getZeros(aList):
idxs = []
for i in range(len(aList)):
if aList[i] == 0:
idxs.append(i)
return idxs
#get zero positions
def getLongestZeros(x):
listM=[]
for i in x:
listM.append(i[2])
maxV=max(listM)
idx=listM.index(maxV)
zeroV=x[idx]
return [idx,zeroV]
#Test: arrays, each array start, end and length of zeros
'''
x=[[12, 14, 3], [19, 22, 4], [24, 45, 22], [32, 49, 18]]
[idx,zeros]=getLongestZeros(x)
print(idx,zeros)
'''
def splitList(n):
#return the list index"""
return [(x+1) for x,y in zip(n, n[1:]) if y-x != 1]
def getSubList(myList):
#split the list base on the index"""
myIndex = splitList(myList)
output = list()
prev = 0
for index in myIndex:
newList = [ x for x in myList[prev:] if x < index]
output.append(newList)
prev += len(newList)
output.append([ x for x in myList[prev:]])
return output
#data =[ 1, 4,5,6,7, 8,10, 15,16,17,18, 22, 25,26,27,28]
#print(getSubList(data))
#--------------------------------------------------------------------------
# create variants
def createVariants(poss,refs,alts):
variants={}
i=0
for snp in zip(refs,alts):
snpx=snp[0]+'->'+snp[1]
pos=poss[i]
i=i+1
variants[pos]=snpx
return variants
#--------------------------------------------------------------------------
# Get the Jaccab distance of two variants
# Inputs: variant, variants2 (positions and SNPs at the positions)
# Output: distance
def getJaccabDistSNP(variantsA,variantsB):
key1=variantsA.keys()
key2=variantsB.keys()
key12=list(set().union(key1,key2))
#count how many SNPs in the same corresponding positions
commonSNP=0
for key, value in variantsA.items():
if key in key2:
if value==variantsB[key]:
commonSNP=commonSNP+1
distance=1-commonSNP/len(key12)
return distance
#Test
'''
poss=[100,102,250,300,302,400,450,1000]
refs=['A','C','G','A','A','C','G','A']
alts=['T','G','C','T','T','G','C','T']
variants=createVariants(poss,refs,alts)
print('Test',variants)
poss2=[100,102,250,300,302,400,450,1000,1001]
refs2=['A','C','G','A','A','C','G','A','G']
alts2=['G','G','C','T','T','G','C','C','A']
variants2=createVariants(poss2,refs2,alts2)
dist=getJaccabDistSNP(variants,variants2)
print('Distance:',dist)
#Split a variant into two variants (high SNPs and low SNPs)
poss=[100,102,250,300,302,400,450,1000]
refs=['A','C','G','A','A','C','G','A']
alts=['T','G','C','T','T','G','C','T']
variants=createVariants(poss,refs,alts)
print('Test',variants)
'''
# Filter all variants into two variants (high variant SNPs and low variant SNPs)
# Inputs: all variants: variantSNPs, and the list of the positions of high SNPs
# Output: variants of high and low SNPs
def getHighLosVariantSNPs(variantSNPs,posHighSNPs):
#posHighSNPs=[102,400]
highSNPs=[]
lowSNPs=[]
variantSNPsH={}
variantSNPsL={}
poss=variantSNPs.keys()
for i in poss:
if i in posHighSNPs:
value=variantSNPs[i]
highSNPs.append(value)
variantSNPsH[i]=value
else:
value=variantSNPs[i]
lowSNPs.append(value)
variantSNPsL[i]=value
#print(variantSNPsH)
#print(variantSNPsL)
return [variantSNPsH,variantSNPsL]
# TEST:getHighLosVariantSNPs(variantSNPs,posHighSNPs):
'''
poss=[100,102,250,300,302,400,450,1000]
refs=['A','C','G','A','A','C','G','A']
alts=['T','G','C','T','T','G','C','T']
posHighSNPs=[102,400] # positions of high SNPs
variantSNPs=createVariants(poss,refs,alts) #input
[variantSNPsH,variantSNPsL]=getHighLosVariantSNPs(variantSNPs,posHighSNPs)
'''
#d = {0:1, 1:2, 2:3, 10:4, 11:5, 12:6, 100:7, 101:8, 102:9, 200:10, 201:11, 202:12}
#d1 = {k: d[k] for k in filter(lambda x: 1 < x <= 11, d.keys())}
# Note: The SNPs High regions are defined from file: main_SNPHistograms.py
def getVariantSNPsH(variantSNPs):
variantSNPsH = {k: variantSNPs[k] for k in filter(lambda x: 1462625 <= x <= 1506875 or 1832875 <= x <= 1859875 or 1924875 <= x <= 1962375 or 2034875 <= x <= 2085375 , variantSNPs.keys())}
return variantSNPsH
# Note: The SNPs Lowh regions are defined from file: main_SNPHistograms.py
def getVariantSNPsL(variantSNPs):
variantSNPsL = {k: variantSNPs[k] for k in filter(lambda x: x<1462625 or 1506875<x<1832875 or 1859875<x<1924875 or 1962375< x <2034875 or x> 2085375, variantSNPs.keys())}
return variantSNPsL
'''
poss=[100,102,250,300,302,400,450,1000]
refs=['A','C','G','A','A','C','G','A']
alts=['T','G','C','T','T','G','C','T']
# high pos: 102-300,400-450
variantSNPs=createVariants(poss,refs,alts) #input
[variantSNPsH]=getVariantSNPsH(variantSNPs)
print(variantSNPsH)
'''
#--------------------------------------------------------------------------
# Get the Jaccab distance of two variants
# Inputs: variant, variants2 (positions and SNPs at the positions)
# Output: distance
def getCosineDistSNP_dev(variantsA,variantsB):
key1=variantsA.keys()
key2=variantsB.keys()
key12=list(set().union(key1,key2))
#count how many SNPs in the same corresponding positions
commonSNP=0
for key, value in variantsA.items():
if key in key2:
if value==variantsB[key]:
commonSNP=commonSNP+1
distance=1-commonSNP/len(key12)
#print('SNP distance=',distance)
return distance
#Test
'''
poss=[100,102,250,300,302,400,450,1000]
refs=['A','C','G','A','A','C','G','A']
alts=['T','G','C','T','T','G','C','T']
variants=createVariants(poss,refs,alts)
print('Test',variants)
poss2=[100,102,250,300,302,400,450,1000,1001]
refs2=['A','C','G','A','A','C','G','A','G']
alts2=['G','G','C','T','T','G','C','C','A']
variants2=createVariants(poss2,refs2,alts2)
dist=getJaccabDistSNP(variants,variants2)
print('Distance:',dist)
'''
#--------------------------------------------------------------------------
# Convert vcf file to H5
# Input: vcf file name
# output: H5 file with the same name generated in the same folder
def convertVCFToH5(vcfFileName):
names=vcfFileName.split('.')
h5FileName=names[0]+'.h5'
h5File = Path(h5FileName)
if h5File.is_file():
print(' ')
else:
print('Convertion')
allel.vcf_to_hdf5(vcfFileName, h5FileName, fields='*', overwrite=True) # The saved data can be accessed via the h5py library, e.g.:
#--------------------------------------------------------------------------
# get variants from vcf file
# Input: vcf file
# Outputs:callset,variants,variantSNPs
def getVariants(vcfFileName):
convertVCFToH5(vcfFileName) # Need to conver to H5 format to use VariantChunkedTable
names=vcfFileName.split('.')
h5FileName=names[0]+'.h5'
callset = h5py.File(h5FileName, mode='r')
chrom = 'variants'
variants = allel.VariantChunkedTable(callset[chrom],index='POS')#['variants'], names=['POS', 'REF', 'ALT'],index='POS')
poss=variants['POS']
refs=variants['REF']
alts=variants['ALT'][:, 0]
variantSNPs={} #make a new format of variants: pos:A->T etc.
i=0
for snp in zip(refs,alts):
snpx=snp[0]+'->'+snp[1]
pos=poss[i]
i=i+1
variantSNPs[pos]=snpx
return callset,variants,variantSNPs
#--------------------------------------------------------------------------
# Get the distribution density (histogram) of SNP variants
# Input: callset, window size
# Output: distribution density of SNP variants (y) at each position (x)
def getSNPHistogram(callset, winSize):
pos = allel.SortedIndex(callset['variants/POS'])
bins = np.arange(0, pos.max(), winSize)
# use window midpoints as x coordinate
x = (bins[1:] + bins[:-1])/2
# compute variant density in each window
y, _ = np.histogram(pos, bins=bins)
#y = y / windowSize
return [x,y]
| 8,858 | 26.512422 | 189 | py |
NGS | NGS-master/main_SNPDistancesHighLowSNPs.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 17:46:00 2018
Changchuan Yin
Dept. of Mathematics, Statistics and Computer Science.
University of Illinois at Chicago
Email: [email protected]
Citation:
Yin., C., & Yau., S.S.-T (2018). Whole genome single nucleotide polymorphism genotyping of Staphylococcus aureus.
Communications in Information and Systems, 2018
"""
#-----------------------------------------------------------------------------------------------
# Program to compute the pair-wise distances between SNP variants from .vcf files, the vcf files
# were created by mapping NGS reads of a genome onto a reference genome.
# The distance is for constructing a phylogenetic tree.
#-----------------------------------------------------------------------------------------------
import gene_snp as gs
#----------------------------------------------------------------------------------------------
# 1. The VCF files for computing the distances. VCF file name: genome name
genes = {'SRR7295360.vcf':'S.aureus HPV107','SRR7295358.vcf':'S.aureus NRS2','ERR2541869.vcf':'S.aureus ST88','ERR2275539.vcf':'S.aureus MSSA-HWC2014'}
n=len(genes)
keys = genes.keys()
keys = list(keys)
names = genes.values()
names = list(names)
print(names)
# 2. Compute the pair-wise Jaccard distances of variants, the output is a text file (distance matrix), distances.txt
# Note: this distance file is the input to the MATLAB program, phylogeneticTreeByDistances.m, for phylogenetic tree build.
# https://github.com/cyinbox/NGS/blob/master/phylogeneticTreeByDistances.m
#------------------------------------------------------------------------------
# 2.1 High SNP distances
file = open('distancesHighSNPs.txt', 'w')
if len(names) > len(set(names)):
print('Not unique strain names!') # Only unique names are allowed in phylogenetic tree
else:
for i in range(0,n):
keyA=keys[i]
nameA=names[i]
print(nameA)
callsetA,variantsA,variantSNPsA = gs.getVariants(keyA)
variantSNPsH_a = gs.getVariantSNPsH(variantSNPsA)
for j in range(i+1,n):
keyB = keys[j]
nameB = names[j]
print(nameB)
callsetB,variantsB,variantSNPsB = gs.getVariants(keyB)
variantSNPsH_b = gs.getVariantSNPsH(variantSNPsB)
dist=gs.getJaccabDistSNP(variantSNPsH_a,variantSNPsH_b)
print(keyA,keyB,dist)
strDist = nameA+','+nameB+','+str(dist)+'\n'
file.write(strDist)
file.close()
print('High SNP distances computed successfully.')
#------------------------------------------------------------------------------
# 2.2. Low SNP distances
file = open('distancesLowSNPs.txt', 'w')
if len(names) > len(set(names)):
print('Not unique strain names!') # Only unique names are allowed in phylogenetic tree
else:
for i in range(0,n):
keyA = keys[i]
nameA = names[i]
print(nameA)
callsetA,variantsA,variantSNPsA = gs.getVariants(keyA)
variantSNPsL_a = gs.getVariantSNPsL(variantSNPsA)
for j in range(i+1,n):
keyB = keys[j]
nameB = names[j]
print(nameB)
callsetB,variantsB,variantSNPsB = gs.getVariants(keyB)
variantSNPsL_b = gs.getVariantSNPsL(variantSNPsB)
dist = gs.getJaccabDistSNP(variantSNPsL_a,variantSNPsL_b)
print(keyA,keyB,dist)
strDist = nameA+','+nameB+','+str(dist)+'\n'
file.write(strDist)
file.close()
print('Low SNP distances computed successfully.')
| 3,458 | 33.939394 | 151 | py |
NGS | NGS-master/main_SNPHistograms.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 17:46:00 2018
Changchuan Yin
Dept. of Mathematics, Statistics and Computer Science.
University of Illinois at Chicago
Email: [email protected]
Citation:
Yin., C., & Yau., S.S.-T (2018). Whole genome single nucleotide polymorphism genotyping of Staphylococcus aureus.
Communications in Information and Systems, 2018
"""
#------------------------------------------------------------------------------
# Program to compute SNP variants from a .vcf file
# - Histogram of the SNPs
# - High and low SNP regions
#-----------------------------------------------------------------------------------------------
import matplotlib.pyplot as plt
import gene_snp as gs
#------------------------------------------------------------------------------
# 0. Input vcf File
#
vcfFileName='SRR5617496.vcf'#:'S.aureus USA300-22862_R1'
names=vcfFileName.split('.')
h5FileName=names[0]+'.h5'
varants_png=names[0]+'.png'
varants_eps=names[0]+'.eps'
varants_pdf=names[0]+'.pdf'
#-----------------------------------------------------------------------------
# 1.Get variants from the vcf file
callset,variants,variantSNPs = gs.getVariants(vcfFileName)
print(callset)
x=sorted(callset.keys())
print('Sorted keys:',x)
chrom = 'variants/CHROM'
print('\n')
print('Chromomes',callset[chrom])
sample=callset['samples']
print('Sample',sample)
#Reference
ref = callset['variants/REF']
print('REF',ref)
refs=str(ref)
print('Length:',len(refs))
qual=callset['variants/QUAL']
print('QUAL',qual)
#------------------------------------------------------------------------------
# 2. Get and plot the hisgrogram of the variants
winSize=250
[pos,SNPs]=gs.getSNPHistogram(callset, winSize=250)
title=str(names) + 'variant histogram, window size 250 bp'
fig, ax = plt.subplots(figsize=(24, 6))
ax.stem(pos,SNPs,'-.')
ax.set_xlabel('chromosome position (bp)',fontsize=20)
ax.set_ylabel('variant count',fontsize=20)
ax.set_title(title)
plt.savefig(varants_png, dpi = 300)
plt.savefig(varants_eps, dpi = 300)
plt.show()
#----------------------------------------------------------------------------------
# 3. Get the high SNPs regions
posHighSNPs = []
posLowSNPs = []
threshold = 10 # When SNP number is larger than threshod 10, the SNP position is recorded
for pos_, SNPs_ in zip(pos, SNPs):
if SNPs_>= threshold:
print(int(pos_))
posHighSNPs.append(pos_)
else:
posLowSNPs.append(pos_)
print('Positions of high SNPs are',posHighSNPs)
#---------------------------------------------------------------------------------
# 4. Results of high and low SNPs
'''
# Using [pos,SNPs]=gs.getSNPHistogram(callset, winSize=250) for typical MRSA genomes,
# the following regions that have high SNPs after checking the positions of high SNPs
#289625-294125 size: 4500
#405625-407375 size: 1750
#550375-554875 size: 4500
#The following regions are selected for phylogenetic tree (high SNP) since each region is long
1462625-1506875 size: 44250
1832875-1859875 size: 27000
1924875-1962375 size: 37500
2034875-2085375 size: 50500
'''
'''
# In gene_snp.py file, add these two functions.
def getVariantSNPsH(variantSNPs):
variantSNPsH = {k: variantSNPs[k] for k in filter(lambda x: 1462625 <= x <= 1506875 or 1832875 <= x <= 1859875 or 1924875 <= x <= 1962375 or 2034875 <= x <= 2085375 , variantSNPs.keys())}
return variantSNPsH
# Note: The SNPs Lowh regions are defined from file: main_SNPHistograms.py
def getVariantSNPsL(variantSNPs):
variantSNPsL = {k: variantSNPs[k] for k in filter(lambda x: x<1462625 or 1506875<x<1832875 or 1859875<x<1924875 or 1962375< x <2034875 or x> 2085375, variantSNPs.keys())}
return variantSNPsL
''' | 3,677 | 31.548673 | 189 | py |
CGMM | CGMM-master/score.py | from typing import List
import torch
from pydgn.training.callback.metric import Metric
class CGMMCompleteLikelihoodScore(Metric):
@property
def name(self) -> str:
return 'Complete Log Likelihood'
def __init__(self, use_as_loss=False, reduction='mean', use_nodes_batch_size=True):
super().__init__(use_as_loss=use_as_loss, reduction=reduction, use_nodes_batch_size=use_nodes_batch_size)
def on_training_batch_end(self, state):
self.batch_metrics.append(state.batch_score[self.name].item())
if state.model.is_graph_classification:
self.num_samples += state.batch_num_targets
else:
# This works for unsupervised CGMM
self.num_samples += state.batch_num_nodes
def on_eval_epoch_end(self, state):
state.update(epoch_score={self.name: torch.tensor(self.batch_metrics).sum() / self.num_samples})
self.batch_metrics = None
self.num_samples = None
def on_eval_batch_end(self, state):
self.batch_metrics.append(state.batch_score[self.name].item())
if state.model.is_graph_classification:
self.num_samples += state.batch_num_targets
else:
# This works for unsupervised CGMM
self.num_samples += state.batch_num_nodes
def _score_fun(self, targets, *outputs, batch_loss_extra):
return outputs[2]
def forward(self, targets: torch.Tensor, *outputs: List[torch.Tensor], batch_loss_extra: dict = None) -> dict:
return outputs[2]
class CGMMTrueLikelihoodScore(Metric):
@property
def name(self) -> str:
return 'True Log Likelihood'
def __init__(self, use_as_loss=False, reduction='mean', use_nodes_batch_size=True):
super().__init__(use_as_loss=use_as_loss, reduction=reduction, use_nodes_batch_size=use_nodes_batch_size)
def on_training_batch_end(self, state):
self.batch_metrics.append(state.batch_score[self.name].item())
if state.model.is_graph_classification:
self.num_samples += state.batch_num_targets
else:
# This works for unsupervised CGMM
self.num_samples += state.batch_num_nodes
def on_eval_batch_end(self, state):
self.batch_metrics.append(state.batch_score[self.name].item())
if state.model.is_graph_classification:
self.num_samples += state.batch_num_targets
else:
# This works for unsupervised CGMM
self.num_samples += state.batch_num_nodes
def _score_fun(self, targets, *outputs, batch_loss_extra):
return outputs[3]
def forward(self, targets: torch.Tensor, *outputs: List[torch.Tensor], batch_loss_extra: dict = None) -> dict:
return outputs[3]
| 2,750 | 36.175676 | 114 | py |
CGMM | CGMM-master/cgmm_embedding_task.py | import os
import shutil
import torch
from cgmm_incremental_task import CGMMTask
# This works with graph classification only
from pydgn.static import LOSS, SCORE
class EmbeddingCGMMTask(CGMMTask):
def run_valid(self, dataset_getter, logger):
"""
This function returns the training and validation or test accuracy
:return: (training accuracy, validation/test accuracy)
"""
batch_size = self.model_config.layer_config['batch_size']
shuffle = self.model_config.layer_config['shuffle'] \
if 'shuffle' in self.model_config.layer_config else True
# Instantiate the Dataset
dim_node_features = dataset_getter.get_dim_node_features()
dim_edge_features = dataset_getter.get_dim_edge_features()
dim_target = dataset_getter.get_dim_target()
layers = []
l_prec = self.model_config.layer_config['previous_layers_to_use'].split(',')
concatenate_axis = self.model_config.layer_config['concatenate_on_axis']
max_layers = self.model_config.layer_config['max_layers']
assert concatenate_axis > 0, 'You cannot concat on the first axis for design reasons.'
dict_per_layer = []
stop = False
depth = 1
while not stop and depth <= max_layers:
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}')
if os.path.exists(os.path.join(self.root_exp_path, f'layer_{depth + 1}')):
# print("skip layer", depth)
depth += 1
continue
# load output will concatenate in reverse order
prev_outputs_to_consider = [(depth - int(x)) for x in l_prec if (depth - int(x)) > 0]
train_out = self._create_extra_dataset(prev_outputs_to_consider, mode='train', depth=depth)
val_out = self._create_extra_dataset(prev_outputs_to_consider, mode='validation', depth=depth)
train_loader = dataset_getter.get_inner_train(batch_size=batch_size, shuffle=False, extra=train_out)
val_loader = dataset_getter.get_inner_val(batch_size=batch_size, shuffle=False, extra=val_out)
# ==== # WARNING: WE ARE JUSTPRECOMPUTING OUTER_TEST EMBEDDINGS FOR SUBSEQUENT CLASSIFIERS
# WE ARE NOT TRAINING ON TEST (EVEN THOUGH UNSUPERVISED)
# ==== #
test_out = self._create_extra_dataset(prev_outputs_to_consider, mode='test', depth=depth)
test_loader = dataset_getter.get_outer_test(batch_size=batch_size, shuffle=False, extra=test_out)
# ==== #
# Instantiate the Model
new_layer = self.create_incremental_model(dim_node_features, dim_edge_features, dim_target, depth,
prev_outputs_to_consider)
# Instantiate the engine (it handles the training loop and the inference phase by abstracting the specifics)
incremental_training_engine = self.create_incremental_engine(new_layer)
train_loss, train_score, train_out, \
val_loss, val_score, val_out, \
_, _, test_out = incremental_training_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=test_loader,
max_epochs=self.model_config.layer_config['epochs'],
logger=logger)
for loader, out, mode in [(train_loader, train_out, 'train'), (val_loader, val_out, 'validation'),
(test_loader, test_out, 'test')]:
v_out, e_out, g_out, vo_out, eo_out, go_out = out
# Reorder outputs, which are produced in shuffled order, to the original arrangement of the dataset.
v_out, e_out, g_out, vo_out, eo_out, go_out = self._reorder_shuffled_objects(v_out, e_out, g_out,
vo_out, eo_out, go_out,
loader)
# Store outputs
self._store_outputs(mode, depth, v_out, e_out, g_out, vo_out, eo_out, go_out)
depth += 1
# NOW LOAD ALL EMBEDDINGS AND STORE THE EMBEDDINGS DATASET ON a torch file.
# Consider all previous layers now, i.e. gather all the embeddings
prev_outputs_to_consider = [l for l in range(1, depth + 1)]
prev_outputs_to_consider.reverse() # load output will concatenate in reverse order
# Retrieve only the graph embeddings to save memory.
# In CGMM classfication task (see other experiment file), I will ignore the outer val and reuse the inner val as validation, as I cannot use the splitter.
train_out = self._create_extra_dataset(prev_outputs_to_consider, mode='train', depth=depth, only_g=True)
val_out = self._create_extra_dataset(prev_outputs_to_consider, mode='validation', depth=depth, only_g=True)
test_out = self._create_extra_dataset(prev_outputs_to_consider, mode='test', depth=depth, only_g=True)
# Necessary info to give a unique name to the dataset (some hyper-params like epochs are assumed to be fixed)
embeddings_folder = self.model_config.layer_config['embeddings_folder']
max_layers = self.model_config.layer_config['max_layers']
unibigram = self.model_config.layer_config['unibigram']
C = self.model_config.layer_config['C']
CA = self.model_config.layer_config['CA'] if 'CA' in self.model_config.layer_config else None
aggregation = self.model_config.layer_config['aggregation']
infer_with_posterior = self.model_config.layer_config['infer_with_posterior']
outer_k = dataset_getter.outer_k
inner_k = dataset_getter.inner_k
# ====
if not os.path.exists(os.path.join(embeddings_folder, dataset_getter.dataset_name)):
os.makedirs(os.path.join(embeddings_folder, dataset_getter.dataset_name))
unigram_dim = C + CA if CA is not None else C
assert unibigram == True
# Retrieve unigram if necessary
for unib in [False, True]:
base_path = os.path.join(embeddings_folder, dataset_getter.dataset_name,
f'{max_layers}_{unib}_{C}_{CA}_{aggregation}_{infer_with_posterior}_{outer_k + 1}_{inner_k + 1}')
train_out_emb = torch.cat([d.g_outs if unib else d.g_outs[:, :, :unigram_dim] for d in train_out], dim=0)
torch.save(train_out_emb, base_path + '_train.torch')
val_out_emb = torch.cat([d.g_outs if unib else d.g_outs[:, :, :unigram_dim] for d in val_out], dim=0)
torch.save(val_out_emb, base_path + '_val.torch')
test_out_emb = torch.cat([d.g_outs if unib else d.g_outs[:, :, :unigram_dim] for d in test_out], dim=0)
torch.save(test_out_emb, base_path + '_test.torch')
# CLEAR OUTPUTS
for mode in ['train', 'validation', 'test']:
shutil.rmtree(os.path.join(self.output_folder, mode), ignore_errors=True)
tr_res = {LOSS: {'main_loss': torch.zeros(1)}, SCORE: {'main_score': torch.zeros(1)}}
vl_res = {LOSS: {'main_loss': torch.zeros(1)}, SCORE: {'main_score': torch.zeros(1)}}
return tr_res, vl_res
def run_test(self, dataset_getter, logger):
tr_res = {LOSS: {'main_loss': torch.zeros(1)}, SCORE: {'main_score': torch.zeros(1)}}
vl_res = {LOSS: {'main_loss': torch.zeros(1)}, SCORE: {'main_score': torch.zeros(1)}}
te_res = {LOSS: {'main_loss': torch.zeros(1)}, SCORE: {'main_score': torch.zeros(1)}}
return tr_res, vl_res, te_res
| 7,955 | 53.122449 | 162 | py |
CGMM | CGMM-master/emission.py | import math
import scipy
import scipy.cluster
import scipy.cluster.vq
import torch
# Interface for all emission distributions
from torch.nn import ModuleList
class EmissionDistribution(torch.nn.Module):
def __init__(self):
super().__init__()
def init_accumulators(self):
raise NotImplementedError()
def e_step(self, x_labels, y_labels):
raise NotImplementedError()
def infer(self, p_Q, x_labels):
raise NotImplementedError()
def _m_step(self, x_labels, y_labels, posterior_estimate):
raise NotImplementedError()
def m_step(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
# do not replace replace with torch.distributions yet, it allows GPU computation
class Categorical(EmissionDistribution):
def __init__(self, dim_target, dim_hidden_states):
"""
:param dim_target: dimension of output alphabet
:param dim_hidden_states: hidden states associated with each label
"""
super().__init__()
self.eps = 1e-8 # Laplace smoothing
self.K = dim_target # discrete output labels
self.C = dim_hidden_states # clusters
self.emission_distr = torch.nn.Parameter(torch.empty(self.K, self.C,
dtype=torch.float32),
requires_grad=False)
self.emission_numerator = torch.nn.Parameter(torch.empty_like(self.emission_distr),
requires_grad=False)
for i in range(0, self.C):
em = torch.nn.init.uniform_(torch.empty(self.K, dtype=torch.float32))
self.emission_distr[:, i] = em / em.sum()
self.init_accumulators()
def _flatten_labels(self, labels):
labels = torch.squeeze(labels)
if len(labels.shape) > 1:
# Compute discrete categories from one_hot_input
labels_squeezed = labels.argmax(dim=1)
return labels_squeezed
return labels.long()
def init_accumulators(self):
torch.nn.init.constant_(self.emission_numerator, self.eps)
def e_step(self, x_labels, y_labels):
"""
For each cluster i, returns the probability associated with a specific label.
:param x_labels: unused
:param y_labels: output observables
:return: a tensor of size ?xC representing the estimated posterior distribution for the E-step
"""
y_labels_squeezed = self._flatten_labels(y_labels)
# Returns the emission probability associated to each observable
emission_obs = self.emission_distr[y_labels_squeezed] # ?xC
return emission_obs
def infer(self, p_Q, x_labels):
"""
Compute probability of a label given the probability P(Q) as argmax_y \sum_i P(y|Q=i)P(Q=i)
:param p_Q: tensor of size ?xC
:param x_labels: unused
:return:
"""
'''
# OLD CODE
# We simply compute P(y|x) = \sum_i P(y|Q=i)P(Q=i|x) for each node
inferred_y = torch.mm(p_Q, self.emission_distr.transpose(0, 1)) # ?xK
return inferred_y
'''
p_K_CS = p_Q.unsqueeze(1) * self.emission_distr.unsqueeze(0) # ?xKxC
p_KCS = p_K_CS.reshape((-1, self.K * self.C)) # ?xKC
best_KCS = torch.argmax(p_KCS, dim=1)
best_K = best_KCS // self.C
best_CS = torch.remainder(best_KCS, self.C)
return best_K.unsqueeze(1)
def _m_step(self, x_labels, y_labels, posterior_estimate):
"""
Updates the minibatch accumulators
:param x_labels: unused
:param y_labels: output observable
:param posterior_estimate: a ?xC posterior estimate obtained using the output observables
"""
y_labels_squeezed = self._flatten_labels(y_labels)
self.emission_numerator.index_add_(dim=0, source=posterior_estimate,
index=y_labels_squeezed) # KxC
def m_step(self):
"""
Updates the emission parameters and re-initializes the accumulators.
:return:
"""
self.emission_distr.data = torch.div(self.emission_numerator,
self.emission_numerator.sum(0))
curr_device = self.emission_distr.get_device()
curr_device = curr_device if curr_device != -1 else 'cpu'
# assert torch.allclose(self.emission_distr.sum(0), torch.tensor([1.]).to(curr_device))
self.init_accumulators()
def __str__(self):
return str(self.emission_distr)
# do not replace replace with torch.distributions yet, it allows GPU computation
class ConditionedCategorical(EmissionDistribution):
def __init__(self, dim_features, dim_target, dim_hidden_states):
"""
:param dim_node_features: dimension of input alphabet
:param dim_target: dimension of output alphabet
:param dim_hidden_states: hidden states associated with each label
"""
super().__init__()
self.eps = 1e-8 # Laplace smoothing
self.K = dim_features # discrete input labels
self.Y = dim_target # discrete output labels
self.C = dim_hidden_states # clusters
self.emission_distr = torch.nn.Parameter(torch.empty(self.K,
self.Y,
self.C,
dtype=torch.float32),
requires_grad=False)
for i in range(self.C):
for k in range(self.K):
em = torch.nn.init.uniform_(torch.empty(self.Y,
dtype=torch.float32))
self.emission_distr[k, :, i] = em / em.sum()
self.emission_numerator = torch.nn.Parameter(torch.empty_like(self.emission_distr),
requires_grad=False)
self.init_accumulators()
def init_accumulators(self):
torch.nn.init.constant_(self.emission_numerator, self.eps)
def e_step(self, x_labels, y_labels):
"""
For each cluster i, returns the probability associated with a specific input and output label.
:param x_labels: input observables
:param y_labels: output observables
:return: a tensor of size ?xC representing the estimated posterior distribution for the E-step
"""
x_labels_squeezed = self._flatten_labels(x_labels)
y_labels_squeezed = self._flatten_labels(y_labels)
emission_of_labels = self.emission_distr[x_labels_squeezed, y_labels, :]
return emission_of_labels # ?xC
def infer(self, p_Q, x_labels):
"""
Compute probability of a label given the probability P(Q|x) as argmax_y \sum_i P(y|Q=i,x)P(Q=i|x)
:param p_Q: tensor of size ?xC
:return:
"""
# We simply compute P(y|x) = \sum_i P(y|Q=i,x)P(Q=i|x) for each node
x_labels_squeezed = self._flatten_labels(x_labels)
# First, condition the emission on the input labels
emission_distr_given_x = self.emission_distr[x_labels_squeezed, :, :]
# Then, perform inference
inferred_y = p_Q.unsqueeze(1) * emission_distr_given_x # ?xYxC
inferred_y = torch.sum(inferred_y, dim=2) # ?xY
return inferred_y
def _m_step(self, x_labels, y_labels, posterior_estimate):
"""
Updates the minibatch accumulators
:param x_labels: unused
:param y_labels: output observable
:param posterior_estimate: a ?xC posterior estimate obtained using the output observables
"""
x_labels_squeezed = self._flatten_labels(x_labels)
y_labels_squeezed = self._flatten_labels(y_labels)
for k in range(self.K):
# filter nodes based on their input value
mask = x_labels_squeezed == k
y_labels_masked = y_labels_squeezed[mask]
posterior_estimate_masked = posterior_estimate[mask, :]
# posterior_estimate has shape ?xC
delta_numerator = torch.zeros(self.Y, self.C)
delta_numerator.index_add_(dim=0, source=posterior_estimate_masked,
index=y_labels_masked) # --> Y x C
self.emission_numerator[k, :, :] += delta_numerator
def m_step(self):
"""
Updates the emission parameters and re-initializes the accumulators.
:return:
"""
self.emission_distr.data = torch.div(self.emission_numerator,
torch.sum(self.emission_numerator,
dim=1,
keepdim=True))
assert torch.allclose(self.emission_distr.sum(1), torch.tensor([1.]).to(self.emission_distr.get_device()))
self.init_accumulators()
def __str__(self):
return str(self.emission_distr)
# do not replace replace with torch.distributions yet, it allows GPU computation
class BernoulliEmission(EmissionDistribution):
def __init__(self, dim_target, dim_hidden_states):
super().__init__()
self.eps = 1e-8 # Laplace smoothing
self.C = dim_hidden_states # clusters
self.bernoulli_params = torch.nn.Parameter(torch.nn.init.uniform_(torch.empty(self.C,
dtype=torch.float32)),
requires_grad=False)
self.emission_numerator = torch.nn.Parameter(torch.empty_like(self.bernoulli_params),
requires_grad=False)
self.emission_denominator = torch.nn.Parameter(torch.empty_like(self.bernoulli_params),
requires_grad=False)
self.init_accumulators()
def init_accumulators(self):
torch.nn.init.constant_(self.emission_numerator, self.eps)
torch.nn.init.constant_(self.emission_denominator, self.eps * 2)
def bernoulli_density(self, labels, param):
return torch.mul(torch.pow(param, labels),
torch.pow(1 - param, 1 - labels))
def e_step(self, x_labels, y_labels):
"""
For each cluster i, returns the probability associated with a specific input and output label.
:param x_labels: unused
:param y_labels: output observables
:return: a tensor of size ?xC representing the estimated posterior distribution for the E-step
"""
emission_of_labels = None
for i in range(0, self.C):
if emission_of_labels is None:
emission_of_labels = torch.reshape(self.bernoulli_density(y_labels,
self.bernoulli_params[i]), (-1, 1))
else:
emission_of_labels = torch.cat((emission_of_labels,
torch.reshape(self.bernoulli_density(y_labels,
self.bernoulli_params[i]),
(-1, 1))),
dim=1)
return emission_of_labels
def infer(self, p_Q, x_labels):
"""
Compute probability of a label given the probability P(Q) as argmax_y \sum_i P(y|Q=i)P(Q=i)
:param p_Q: tensor of size ?xC
:param x_labels: unused
:return:
"""
# We simply compute P(y|x) = \sum_i P(y|Q=i)P(Q=i|x) for each node
inferred_y = torch.mm(p_Q, self.bernoulli_params.unsqueeze(1)) # ?x1
return inferred_y
def _m_step(self, x_labels, y_labels, posterior_estimate):
"""
Updates the minibatch accumulators
:param x_labels: unused
:param y_labels: output observable
:param posterior_estimate: a ?xC posterior estimate obtained using the output observables
"""
if len(y_labels.shape) == 1:
y_labels = y_labels.unsqueeze(1)
self.emission_numerator += torch.sum(torch.mul(posterior_estimate,
y_labels), dim=0) # --> 1 x C
self.emission_denominator += torch.sum(posterior_estimate, dim=0) # --> C
def m_step(self):
self.emission_distr = self.emission_numerator / self.emission_denominator
self.init_accumulators()
def __str__(self):
return str(self.bernoulli_params)
class IndependentMultivariateBernoulliEmission(EmissionDistribution):
def init_accumulators(self):
for b in self.indep_bernoulli:
b.init_accumulators()
def __init__(self, dim_target, dim_hidden_states):
super().__init__()
self.eps = 1e-8 # Laplace smoothing
self.indep_bernoulli = ModuleList([BernoulliEmission(dim_target, dim_hidden_states) for _ in range(dim_target)])
self.init_accumulators()
def e_step(self, x_labels, y_labels):
"""
For each cluster i, returns the probability associated with a specific input and output label.
:param x_labels: unused
:param y_labels: output observables
:return: a tensor of size ?xC representing the estimated posterior distribution for the E-step
"""
emission_of_labels = None
# Assume independence
for i, b in enumerate(self.indep_bernoulli):
est_post_dist = b.e_step(x_labels, y_labels[:, i].unsqueeze(1))
if emission_of_labels is None:
emission_of_labels = est_post_dist
else:
emission_of_labels *= est_post_dist
return emission_of_labels
def infer(self, p_Q, x_labels):
"""
Compute probability of a label given the probability P(Q) as argmax_y \sum_i P(y|Q=i)P(Q=i)
:param p_Q: tensor of size ?xC
:param x_labels: unused
:return:
"""
inferred_y = None
# Assume independence
for i, b in enumerate(self.indep_bernoulli):
inferred_yi = b.infer(p_Q, x_labels)
if inferred_y is None:
inferred_y = inferred_yi
else:
inferred_y = torch.cat((inferred_y, inferred_yi), dim=1)
return inferred_y
def _m_step(self, x_labels, y_labels, posterior_estimate):
"""
Updates the minibatch accumulators
:param x_labels: unused
:param y_labels: output observable
:param posterior_estimate: a ?xC posterior estimate obtained using the output observables
"""
# Assume independence
for i, b in enumerate(self.indep_bernoulli):
b._m_step(x_labels, y_labels[:, i].unsqueeze(1), posterior_estimate)
def m_step(self):
# Assume independence
for i, b in enumerate(self.indep_bernoulli):
b.m_step()
self.init_accumulators()
def __str__(self):
return '-'.join([str(b) for b in self.indep_bernoulli])
# do not replace replace with torch.distributions yet, it allows GPU computation
class IsotropicGaussian(EmissionDistribution):
def __init__(self, dim_features, dim_hidden_states, var_threshold=1e-16):
super().__init__()
self.eps = 1e-8 # Laplace smoothing
self.var_threshold = var_threshold # do not go below this value
self.F = dim_features
self.C = dim_hidden_states # clusters
self.mu = torch.nn.Parameter(torch.rand((self.C, self.F),
dtype=torch.float32),
requires_grad=False)
self.var = torch.nn.Parameter(torch.rand((self.C, self.F),
dtype=torch.float32),
requires_grad=False)
self.pi = torch.nn.Parameter(torch.FloatTensor([math.pi]),
requires_grad=False)
self.mu_numerator = torch.nn.Parameter(torch.empty([self.C, self.F],
dtype=torch.float32),
requires_grad=False)
self.mu_denominator = torch.nn.Parameter(torch.empty([self.C, 1],
dtype=torch.float32),
requires_grad=False)
self.var_numerator = torch.nn.Parameter(torch.empty([self.C, self.F],
dtype=torch.float32),
requires_grad=False)
self.var_denominator = torch.nn.Parameter(torch.empty([self.C, 1],
dtype=torch.float32),
requires_grad=False)
# To launch k-means the first time
self.initialized = False
self.init_accumulators()
def to(self, device):
super().to(device)
self.device = device
def initialize(self, labels):
codes, distortion = scipy.cluster.vq.kmeans(labels.cpu().detach().numpy()[:],
self.C, iter=20,
thresh=1e-05)
# Number of prototypes can be < than self.C
self.mu[:codes.shape[0], :] = torch.from_numpy(codes)
self.var[:, :] = torch.std(labels, dim=0)
self.mu = self.mu # .to(self.device)
self.var = self.var # .to(self.device)
def univariate_pdf(self, labels, mean, var):
"""
Univariate case, computes probability distribution for each data point
:param labels:
:param mean:
:param var:
:return:
"""
return torch.exp(-((labels.float() - mean) ** 2) / (2 * var)) / (torch.sqrt(2 * self.pi * var))
def multivariate_diagonal_pdf(self, labels, mean, var):
"""
Multivariate case, DIAGONAL cov. matrix. Computes probability distribution for each data point
:param labels:
:param mean:
:param var:
:return:
"""
diff = (labels.float() - mean)
log_normaliser = -0.5 * (torch.log(2 * self.pi) + torch.log(var))
log_num = - (diff * diff) / (2 * var)
log_probs = torch.sum(log_num + log_normaliser, dim=1)
probs = torch.exp(log_probs)
# Trick to avoid instability, in case variance collapses to 0
probs[probs != probs] = self.eps
probs[probs < self.eps] = self.eps
return probs
def init_accumulators(self):
"""
This method initializes the accumulators for the EM algorithm.
EM updates the parameters in batch, but needs to accumulate statistics in mini-batch style.
:return:
"""
torch.nn.init.constant_(self.mu_numerator, self.eps)
torch.nn.init.constant_(self.mu_denominator, self.eps * self.C)
torch.nn.init.constant_(self.var_numerator, self.eps)
torch.nn.init.constant_(self.var_denominator, self.eps * self.C)
def e_step(self, x_labels, y_labels):
"""
For each cluster i, returns the probability associated to a specific label.
:param x_labels: unused
:param y_labels: output observables
:return: a distribution associated to each layer
"""
if not self.initialized:
self.initialized = True
self.initialize(y_labels)
emission_of_labels = None
for i in range(0, self.C):
if emission_of_labels is None:
emission_of_labels = torch.reshape(self.multivariate_diagonal_pdf(y_labels, self.mu[i], self.var[i]),
(-1, 1))
else:
emission_of_labels = torch.cat((emission_of_labels,
torch.reshape(
self.multivariate_diagonal_pdf(y_labels, self.mu[i], self.var[i]),
(-1, 1))), dim=1)
emission_of_labels += self.eps
assert not torch.isnan(emission_of_labels).any(), (torch.sum(torch.isnan(emission_of_labels)))
return emission_of_labels.detach()
def infer(self, p_Q, x_labels):
"""
Compute probability of a label given the probability P(Q) as argmax_y \sum_i P(y|Q=i)P(Q=i)
:param p_Q: tensor of size ?xC
:param x_labels: unused
:return:
"""
# We simply compute P(y|x) = \sum_i P(y|Q=i)P(Q=i|x) for each node
inferred_y = torch.mm(p_Q, self.mu) # ?xF
return inferred_y
def _m_step(self, x_labels, y_labels, posterior_estimate):
"""
Updates the minibatch accumulators
:param x_labels: unused
:param y_labels: output observable
:param posterior_estimate: a ?xC posterior estimate obtained using the output observables
"""
y_labels = y_labels.float()
for i in range(0, self.C):
reshaped_posterior = torch.reshape(posterior_estimate[:, i], (-1, 1)) # for broadcasting with F > 1
den = torch.unsqueeze(torch.sum(posterior_estimate[:, i], dim=0), dim=-1) # size C
y_weighted = torch.mul(y_labels, reshaped_posterior) # ?xF x ?x1 --> ?xF
y_minus_mu_squared_tmp = y_labels - self.mu[i, :]
# DIAGONAL COV MATRIX
y_minus_mu_squared = torch.mul(y_minus_mu_squared_tmp, y_minus_mu_squared_tmp)
self.mu_numerator[i, :] += torch.sum(y_weighted, dim=0)
self.var_numerator[i] += torch.sum(torch.mul(y_minus_mu_squared, reshaped_posterior), dim=0)
self.mu_denominator[i, :] += den
self.var_denominator[i, :] += den
def m_step(self):
"""
Updates the emission parameters and re-initializes the accumulators.
:return:
"""
self.mu.data = self.mu_numerator / self.mu_denominator
# Laplace smoothing
self.var.data = (self.var_numerator + self.var_threshold) / (self.var_denominator + self.C * self.var_threshold)
self.init_accumulators()
def __str__(self):
return f"{str(self.mu)}, {str(self.mu)}"
| 22,661 | 40.734807 | 120 | py |
CGMM | CGMM-master/readout.py | import torch
from pydgn.model.interface import ReadoutInterface
class CGMMGraphReadout(ReadoutInterface):
def __init__(self, dim_node_features, dim_edge_features, dim_target, config):
super().__init__(dim_node_features, dim_edge_features, dim_target, config)
embeddings_node_features = dim_node_features
hidden_units = config['hidden_units']
self.fc_global = torch.nn.Linear(embeddings_node_features, hidden_units)
self.out = torch.nn.Linear(hidden_units, dim_target)
def forward(self, data):
out = self.out(torch.relu(self.fc_global(data.x.float())))
return out, data.x
| 641 | 32.789474 | 82 | py |
CGMM | CGMM-master/cgmm.py | import torch
from pydgn.model.interface import ModelInterface
from util import compute_bigram, compute_unigram
from torch_geometric.nn import global_mean_pool, global_add_pool
from torch_scatter import scatter_add, scatter_max
class CGMM(ModelInterface):
def __init__(self, dim_node_features, dim_edge_features, dim_target, readout_class, config):
super().__init__(dim_node_features, dim_edge_features, dim_target, readout_class, config)
self.device = None
self.readout_class = readout_class
self.is_first_layer = config['depth'] == 1
self.depth = config['depth']
self.training = False
self.return_node_embeddings = False
self.K = dim_node_features
self.Y = dim_target
self.L = len(config['prev_outputs_to_consider'])
self.A = config['A']
self.C = config['C']
self.C2 = config['C'] + 1
self.CS = config.get('CS', None)
self.is_graph_classification = self.CS is not None
# self.add_self_arc = config['self_arc'] if 'self_arc' in config else False
self.use_continuous_states = config['infer_with_posterior']
self.unibigram = config['unibigram']
self.aggregation = config['aggregation']
self.readout = readout_class(dim_node_features, dim_edge_features,
dim_target, config)
if self.is_first_layer:
self.transition = BaseTransition(self.C)
else:
self.transition = CGMMTransition(self.C, self.A,
self.C2, self.L)
self.init_accumulators()
def init_accumulators(self):
self.readout.init_accumulators()
self.transition.init_accumulators()
# Do not delete this!
if self.device: # set by to() method
self.to(self.device)
def to(self, device):
super().to(device)
self.device = device
def train(self):
self.readout.train()
self.transition.train()
self.training = True
def eval(self):
self.readout.eval()
self.transition.eval()
self.training = False
def forward(self, data):
extra = None
if not self.is_first_layer:
data, extra = data[0], data[1]
return self.e_step(data, extra)
def e_step(self, data, extra=None):
x, y, batch = data.x, data.y, data.batch
prev_stats = None if self.is_first_layer else extra.vo_outs
if prev_stats is not None:
prev_stats.to(self.device)
# --------------------------- FORWARD PASS --------------------------- #
# t = time.time()
# --- TRANSITION contribution
if self.is_first_layer:
# p_Q_given_obs --> ?xC
p_Q_given_obs = self.transition.e_step(x)
transition_posterior = p_Q_given_obs
rightmost_term = p_Q_given_obs
else:
# p_Q_given_obs --> ?xC / transition_posterior --> ?xLxAxCxC2
p_Q_given_obs, transition_posterior, rightmost_term = self.transition.e_step(x, prev_stats)
# assert torch.allclose(p_Q_given_obs.sum(1), torch.tensor([1.]).to(self.device)), p_Q_given_obs.sum(1)
# print(f"Transition E-step time: {time.time()-t}"); t = time.time()
# --- READOUT contribution
# true_log_likelihood --> ?x1 / readout_posterior --> ?xCSxCN or ?xCN
true_log_likelihood, readout_posterior, emission_target = self.readout.e_step(p_Q_given_obs, x, y, batch)
# print(f"Readout E-step time: {time.time()-t}"); t = time.time()
# likely_labels --> ? x Y
likely_labels = self.readout.infer(p_Q_given_obs, x, batch)
# print(f"Readout inference time: {time.time()-t}"); t = time.time()
# -------------------------------------------------------------------- #
if not self.is_graph_classification:
complete_log_likelihood, eui = self._e_step_node(x, y, p_Q_given_obs,
transition_posterior, rightmost_term,
readout_posterior, emission_target,
batch)
else:
complete_log_likelihood, eui = self._e_step_graph(x, y, p_Q_given_obs,
transition_posterior, rightmost_term,
readout_posterior, emission_target,
batch)
# print(f"Posterior E-step time: {time.time()-t}"); t = time.time()
num_nodes = x.shape[0]
# CGMM uses the true posterior (over node attributes) as it is unsupervised!
# Different from IO version
if self.return_node_embeddings:
# print("Computing intermediate outputs")
assert not self.training
statistics_batch = self._compute_statistics(eui, data, self.device)
node_unigram = compute_unigram(eui, self.use_continuous_states)
graph_unigram = self._get_aggregation_fun()(node_unigram, batch)
if self.unibigram:
node_bigram = compute_bigram(eui.float(), data.edge_index, batch,
self.use_continuous_states)
graph_bigram = self._get_aggregation_fun()(node_bigram, batch)
node_embeddings_batch = torch.cat((node_unigram, node_bigram), dim=1)
graph_embeddings_batch = torch.cat((graph_unigram, graph_bigram), dim=1)
else:
node_embeddings_batch = node_unigram
graph_embeddings_batch = graph_unigram
# to save time during debug
embeddings = (None, None, graph_embeddings_batch, statistics_batch, None, None)
else:
embeddings = None
return likely_labels, embeddings, complete_log_likelihood, \
true_log_likelihood, num_nodes
def _e_step_graph(self, x, y, p_Q_given_obs, transition_posterior,
rightmost_term, readout_posterior, emission_target, batch):
# batch (i.e., replicate) graph readout posterior for all nodes
b_readout_posterior = readout_posterior[batch] # ?nxCSxCN
if self.is_first_layer:
# ----------------------------- Posterior ---------------------------- #
# expand
exp_readout_posterior = b_readout_posterior.reshape((-1, self.CS,
self.C))
# expand
exp_transition_posterior = transition_posterior.unsqueeze(1)
# batch graph sizes + expand
b_graph_sizes = scatter_add(torch.ones_like(batch).to(self.device),
batch)[batch].reshape([-1, 1, 1])
unnorm_posterior_estimate = torch.div(torch.mul(exp_readout_posterior,
exp_transition_posterior),
b_graph_sizes)
Z = global_add_pool(unnorm_posterior_estimate.sum((1, 2), keepdim=True), batch)
Z[Z == 0.] = 1.
esui = unnorm_posterior_estimate / (Z[batch]) # --> ?n x CS x CN
eui = esui.sum(1) # ?n x CN
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, esui, batch)
self.transition._m_step(x, y, eui)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(esui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(eui, p_Q_given_obs)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
else:
# ----------------------------- Posterior ---------------------------- #
# expand
exp_readout_posterior = b_readout_posterior.reshape((-1, self.CS,
1, 1,
self.C, 1))
# expand
exp_transition_posterior = transition_posterior.unsqueeze(1)
# batch graph sizes + expand
b_graph_sizes = scatter_add(torch.ones_like(batch).to(self.device),
batch)[batch].reshape([-1, 1, 1, 1, 1, 1])
unnorm_posterior_estimate = torch.div(torch.mul(exp_readout_posterior,
exp_transition_posterior),
b_graph_sizes)
Z = global_add_pool(unnorm_posterior_estimate.sum((1, 2, 3, 4, 5), keepdim=True), batch)
Z[Z == 0.] = 1.
esuilaj = unnorm_posterior_estimate / (Z[batch]) # --> ?n x CS x L x A x C x C2
euilaj = esuilaj.sum(1) # Marginalize over CS --> transition M-step
euila = euilaj.sum(4) # ?n x L x A x C
euil = euila.sum(2) # ?n x L x C
esui = esuilaj.sum((2, 3, 5)) # Marginalize over L,A,C2 --> readout M-step
eui = euil.sum(1) # ?n x C
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, esui, batch)
self.transition._m_step(x, y, euilaj, euila, euil)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(esui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(euilaj, euila, euil,
rightmost_term)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
return complete_log_likelihood, eui
def _e_step_node(self, x, y, p_Q_given_obs, transition_posterior,
rightmost_term, readout_posterior, emission_target, batch):
if self.is_first_layer:
# ----------------------------- Posterior ---------------------------- #
unnorm_posterior_estimate = readout_posterior * transition_posterior
Z = unnorm_posterior_estimate.sum(1, keepdim=True)
Z[Z == 0.] = 1.
eui = unnorm_posterior_estimate / Z # --> ? x CN
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, eui, batch)
self.transition._m_step(x, y, eui)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(eui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(eui, p_Q_given_obs)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
else:
# ----------------------------- Posterior ---------------------------- #
# expand
exp_readout_posterior = readout_posterior.reshape((-1, 1, 1, self.C, 1))
unnorm_posterior_estimate = torch.mul(exp_readout_posterior,
transition_posterior)
Z = unnorm_posterior_estimate.sum((1, 2, 3, 4), keepdim=True)
Z[Z == 0.] = 1.
euilaj = unnorm_posterior_estimate / Z # --> ?n x L x A x C x C2
euila = euilaj.sum(4) # ?n x L x A x C
euil = euila.sum(2) # ?n x L x C
eui = euil.sum(1) # ?n x C
if self.training:
# Update the accumulators (also useful for minibatch training)
self.readout._m_step(x, y, eui, batch)
self.transition._m_step(x, y, euilaj, euila, euil)
# -------------------------------------------------------------------- #
# ---------------------- Complete Log Likelihood --------------------- #
complete_log_likelihood_readout = self.readout.complete_log_likelihood(eui, emission_target, batch)
complete_log_likelihood_transition = self.transition.complete_log_likelihood(euilaj, euila, euil,
rightmost_term)
complete_log_likelihood = complete_log_likelihood_readout + complete_log_likelihood_transition
# -------------------------------------------------------------------- #
# assert torch.allclose(eui.sum(1), torch.tensor([1.]).to(self.device)), eui.sum(1)[eui.sum(1) != 1.]
return complete_log_likelihood, eui
def m_step(self):
self.readout.m_step()
self.transition.m_step()
self.init_accumulators()
def stopping_criterion(self, depth, max_layers, train_loss, train_score, val_loss, val_score,
dict_per_layer, layer_config, logger=None):
return depth == max_layers
def _compute_statistics(self, posteriors, data, device):
statistics = torch.full((posteriors.shape[0], self.A + 1, posteriors.shape[1] + 1), 0., dtype=torch.float32).to(
device)
srcs, dsts = data.edge_index
if self.A == 1:
sparse_adj_matr = torch.sparse_coo_tensor(data.edge_index, \
torch.ones(data.edge_index.shape[1], dtype=posteriors.dtype).to(
device), \
torch.Size([posteriors.shape[0],
posteriors.shape[0]])).to(device).transpose(0, 1)
statistics[:, 0, :-1] = torch.sparse.mm(sparse_adj_matr, posteriors)
else:
for arc_label in range(self.A):
sparse_label_adj_matr = torch.sparse_coo_tensor(data.edge_index, \
(data.edge_attr == arc_label).to(device).float(), \
torch.Size([posteriors.shape[0],
posteriors.shape[0]])).to(device).transpose(
0, 1)
statistics[:, arc_label, :-1] = torch.sparse.mm(sparse_label_adj_matr, posteriors)
# Deal with nodes with degree 0: add a single fake neighbor with uniform posterior
degrees = statistics[:, :, :-1].sum(dim=[1, 2]).floor()
statistics[degrees == 0., :, :] = 1. / self.C2
'''
if self.add_self_arc:
statistics[:, self.A, :-1] += posteriors
'''
# use self.A+1 as special edge for bottom states (all in self.C2-1)
max_arieties, _ = self._compute_max_ariety(degrees.int().to(self.device), data.batch)
max_arieties[max_arieties == 0] = 1
statistics[:, self.A, self.C] += degrees / max_arieties[data.batch].float()
return statistics
def _compute_sizes(self, batch, device):
return scatter_add(torch.ones(len(batch), dtype=torch.int).to(device), batch)
def _compute_max_ariety(self, degrees, batch):
return scatter_max(degrees, batch)
def _get_aggregation_fun(self):
if self.aggregation == 'mean':
aggregate = global_mean_pool
elif self.aggregation == 'sum':
aggregate = global_add_pool
return aggregate
class CGMMTransition(torch.nn.Module):
def __init__(self, c, a, c2, l):
super().__init__()
self.device = None
self.eps = 1e-8 # Laplace smoothing
self.C = c
self.orig_A = a
self.A = a + 1 # bottom state connected with a special arc
self.C2 = c2
self.L = l
self.layerS = torch.nn.Parameter(torch.nn.init.uniform_(torch.empty(self.L, dtype=torch.float32)),
requires_grad=False)
self.arcS = torch.nn.Parameter(torch.zeros((self.L, self.A), dtype=torch.float32), requires_grad=False)
self.transition = torch.nn.Parameter(torch.empty([self.L, self.A, self.C, self.C2], dtype=torch.float32),
requires_grad=False)
self.layerS /= self.layerS.sum() # inplace
for layer in range(self.L):
self.arcS[layer, :] = torch.nn.init.uniform_(self.arcS[layer, :])
self.arcS[layer, :] /= self.arcS[layer, :].sum()
for arc in range(self.A):
for j in range(self.C2):
tr = torch.nn.init.uniform_(torch.empty(self.C))
self.transition[layer, arc, :, j] = tr / tr.sum()
# These are variables where I accumulate intermediate minibatches' results
# These are needed by the M-step update equations at the end of an epoch
self.layerS_numerator = torch.nn.Parameter(torch.empty_like(self.layerS),
requires_grad=False)
self.arcS_numerator = torch.nn.Parameter(torch.empty_like(self.arcS),
requires_grad=False)
self.transition_numerator = torch.nn.Parameter(torch.empty_like(self.transition),
requires_grad=False)
self.init_accumulators()
def to(self, device):
super().to(device)
self.device = device
def init_accumulators(self):
torch.nn.init.constant_(self.layerS_numerator, self.eps)
torch.nn.init.constant_(self.arcS_numerator, self.eps)
torch.nn.init.constant_(self.transition_numerator, self.eps)
def e_step(self, x_labels, stats=None, batch=None):
# ---------------------------- Forward Pass -------------------------- #
stats = stats.float()
# Compute the neighbourhood dimension for each vertex
neighbDim = stats.sum(dim=3, keepdim=True).unsqueeze(4) # --> ?n x L x A x 1
# Replace zeros with ones to avoid divisions by zero
# This does not alter learning: the numerator can still be zero
neighbDim[neighbDim == 0] = 1.
transition = torch.unsqueeze(self.transition, 0) # --> 1 x L x A x C x C2
stats = stats.unsqueeze(3) # --> ?n x L x A x 1 x C2
rightmost_term = (transition * stats) / neighbDim # --> ?n x L x A x C x C2
layerS = torch.reshape(self.layerS, [1, self.L, 1]) # --> L x 1 x 1
arcS = torch.reshape(self.arcS, [1, self.L, self.A, 1]) # --> 1 x L x A x 1
tmp = (arcS * rightmost_term.sum(4)).sum(dim=2) # --> ?n x L x C
p_Q_given_obs = (layerS * tmp).sum(dim=1) # --> ?n x C
# -------------------------------------------------------------------- #
# ----------------------------- Posterior ---------------------------- #
layerS_expanded = torch.reshape(self.layerS, [1, self.L, 1, 1, 1])
arcS_expanded = torch.reshape(self.arcS, [1, self.L, self.A, 1, 1])
transition_posterior = layerS_expanded * arcS_expanded * rightmost_term
# -------------------------------------------------------------------- #
return p_Q_given_obs, transition_posterior, rightmost_term
def complete_log_likelihood(self, euilaj, euila, euil, rightmost_term):
layerS = torch.reshape(self.layerS, [1, self.L, 1])
term_1 = (euil * (layerS.log())).sum((1, 2)).sum()
arcS = torch.reshape(self.arcS, [1, self.L, self.A, 1])
term_2 = (euila * (arcS.log())).sum((1, 2, 3)).sum()
rightmost_term[rightmost_term == 0.] = 1
term_3 = (euilaj * (rightmost_term.log())).sum((1, 2, 3, 4)).sum()
return term_1 + term_2 + term_3
def _m_step(self, x_labels, y_labels, euilaj, euila, euil):
self.layerS_numerator += euil.sum(dim=(0, 2))
self.arcS_numerator += euila.sum(dim=(0, 3))
self.transition_numerator += euilaj.sum(0) # --> L x A x C x C2
def m_step(self):
self.layerS.data = self.layerS_numerator / self.layerS_numerator.sum(dim=0, keepdim=True)
self.arcS.data = self.arcS_numerator / self.arcS_numerator.sum(dim=1, keepdim=True)
self.transition.data = self.transition_numerator / self.transition_numerator.sum(dim=2, keepdim=True)
self.init_accumulators()
class BaseTransition(torch.nn.Module):
def __init__(self, c):
super().__init__()
self.device = None
self.eps = 1e-8 # Laplace smoothing
self.C = c
self.transition = torch.nn.Parameter(torch.empty([self.C], dtype=torch.float32), requires_grad=False)
tr = torch.nn.init.uniform_(torch.empty(self.C))
self.transition.data = tr / tr.sum()
self.transition_numerator = torch.nn.Parameter(torch.empty_like(self.transition), requires_grad=False)
self.init_accumulators()
def to(self, device):
super().to(device)
self.device = device
def init_accumulators(self):
torch.nn.init.constant_(self.transition_numerator, self.eps)
def e_step(self, x_labels, stats=None, batch=None):
# ---------------------------- Forward Pass -------------------------- #
p_Q_given_obs = self.transition.unsqueeze(0) # --> 1 x C
return p_Q_given_obs
def infer(self, x_labels, stats=None, batch=None):
p_Q_given_obs, _ = self.e_step(x_labels, stats, batch)
return p_Q_given_obs
def complete_log_likelihood(self, eui, p_Q_given_obs):
complete_log_likelihood = (eui * (p_Q_given_obs.log())).sum(1).sum()
return complete_log_likelihood
def _m_step(self, x_labels, y_labels, eui):
self.transition_numerator += eui.sum(0)
def m_step(self):
self.transition.data = self.transition_numerator / self.transition_numerator.sum()
self.init_accumulators()
| 22,943 | 43.638132 | 120 | py |
CGMM | CGMM-master/loss.py | from pydgn.training.callback.metric import Metric
class CGMMLoss(Metric):
@property
def name(self) -> str:
return 'CGMM Loss'
def __init__(self, use_as_loss=True, reduction='mean', use_nodes_batch_size=True):
super().__init__(use_as_loss=use_as_loss, reduction=reduction, use_nodes_batch_size=use_nodes_batch_size)
self.old_likelihood = -float('inf')
self.new_likelihood = None
def on_training_batch_end(self, state):
self.batch_metrics.append(state.batch_loss[self.name].item())
if state.model.is_graph_classification:
self.num_samples += state.batch_num_targets
else:
# This works for unsupervised CGMM
self.num_samples += state.batch_num_nodes
def on_training_epoch_end(self, state):
super().on_training_epoch_end(state)
if (state.epoch_loss[self.name].item() - self.old_likelihood) < 0:
pass
# tate.stop_training = True
self.old_likelihood = state.epoch_loss[self.name].item()
def on_eval_batch_end(self, state):
self.batch_metrics.append(state.batch_loss[self.name].item())
if state.model.is_graph_classification:
self.num_samples += state.batch_num_targets
else:
# This works for unsupervised CGMM
self.num_samples += state.batch_num_nodes
# Simply ignore targets
def forward(self, targets, *outputs):
likelihood = outputs[2]
return likelihood
def on_backward(self, state):
pass
| 1,555 | 32.826087 | 113 | py |
CGMM | CGMM-master/probabilistic_readout.py | from typing import Tuple, Optional, List
import torch
from pydgn.experiment.util import s2c
class ProbabilisticReadout(torch.nn.Module):
def __init__(self, dim_node_features, dim_edge_features, dim_target, config):
super().__init__()
self.K = dim_node_features
self.Y = dim_target
self.E = dim_edge_features
self.eps = 1e-8
def init_accumulators(self):
raise NotImplementedError()
def e_step(self, p_Q, x_labels, y_labels, batch):
raise NotImplementedError()
def infer(self, p_Q, x_labels, batch):
raise NotImplementedError()
def complete_log_likelihood(self, posterior, emission_target, batch):
raise NotImplementedError()
def _m_step(self, x_labels, y_labels, posterior, batch):
raise NotImplementedError()
def m_step(self):
raise NotImplementedError()
class ProbabilisticNodeReadout(ProbabilisticReadout):
def __init__(self, dim_node_features, dim_edge_features, dim_target, config):
super().__init__(dim_node_features, dim_edge_features, dim_target, config)
self.emission_class = s2c(config['emission'])
self.CN = config['C'] # number of states of a generic node
self.emission = self.emission_class(self.Y, self.CN)
def init_accumulators(self):
self.emission.init_accumulators()
def e_step(self, p_Q, x_labels, y_labels, batch):
emission_target = self.emission.e_step(x_labels, y_labels) # ?n x CN
readout_posterior = emission_target
# true log P(y) using the observables
# Mean of individual node terms
p_x = (p_Q * readout_posterior).sum(dim=1)
p_x[p_x == 0.] = 1.
true_log_likelihood = p_x.log().sum(dim=0)
return true_log_likelihood, readout_posterior, emission_target
def infer(self, p_Q, x_labels, batch):
return self.emission.infer(p_Q, x_labels)
def complete_log_likelihood(self, eui, emission_target, batch):
complete_log_likelihood = (eui * (emission_target.log())).sum(1).sum()
return complete_log_likelihood
def _m_step(self, x_labels, y_labels, eui, batch):
self.emission._m_step(x_labels, y_labels, eui)
def m_step(self):
self.emission.m_step()
self.init_accumulators()
class UnsupervisedProbabilisticNodeReadout(ProbabilisticReadout):
def __init__(self, dim_node_features, dim_edge_features, dim_target, config):
super().__init__(dim_node_features, dim_edge_features, dim_target, config)
self.emission_class = s2c(config['emission'])
self.CN = config['C'] # number of states of a generic node
self.emission = self.emission_class(self.K, self.CN)
def init_accumulators(self):
self.emission.init_accumulators()
def e_step(self, p_Q, x_labels, y_labels, batch):
# Pass x_labels as y_labels
emission_target = self.emission.e_step(x_labels, x_labels) # ?n x CN
readout_posterior = emission_target
# true log P(y) using the observables
# Mean of individual node terms
p_x = (p_Q * readout_posterior).sum(dim=1)
p_x[p_x == 0.] = 1.
true_log_likelihood = p_x.log().sum(dim=0)
return true_log_likelihood, readout_posterior, emission_target
def infer(self, p_Q, x_labels, batch):
return self.emission.infer(p_Q, x_labels)
def complete_log_likelihood(self, eui, emission_target, batch):
complete_log_likelihood = (eui * (emission_target.log())).sum(1).sum()
return complete_log_likelihood
def _m_step(self, x_labels, y_labels, eui, batch):
# Pass x_labels as y_labels
self.emission._m_step(x_labels, x_labels, eui)
def m_step(self):
self.emission.m_step()
self.init_accumulators()
| 3,817 | 33.396396 | 82 | py |
CGMM | CGMM-master/cgmm_incremental_task.py | import os
import shutil
import torch
from pydgn.experiment.experiment import Experiment
from pydgn.experiment.util import s2c
from pydgn.static import LOSS, SCORE
from torch.utils.data.sampler import SequentialSampler
from torch_geometric.data import Data
class CGMMTask(Experiment):
def __init__(self, model_configuration, exp_path, exp_seed):
super(CGMMTask, self).__init__(model_configuration, exp_path, exp_seed)
self.root_exp_path = exp_path # to distinguish from layers' exp_path
self.output_folder = os.path.join(exp_path, 'outputs')
self._concat_axis = self.model_config.layer_config['concatenate_on_axis']
def _create_extra_dataset(self, prev_outputs_to_consider, mode, depth, only_g=False):
# Load previous outputs if any according to prev. layers to consider (ALL TENSORS)
v_outs, e_outs, g_outs, vo_outs, eo_outs, go_outs = self._load_outputs(mode, prev_outputs_to_consider)
data_list = []
no_graphs = max(len(v_outs) if v_outs is not None else 0,
len(e_outs) if e_outs is not None else 0,
len(g_outs) if g_outs is not None else 0,
len(vo_outs) if vo_outs is not None else 0,
len(eo_outs) if eo_outs is not None else 0,
len(go_outs) if go_outs is not None else 0)
for index in range(no_graphs):
g = g_outs[index] if g_outs is not None else None
go = go_outs[index] if go_outs is not None else None
if not only_g:
v = v_outs[index] if v_outs is not None else None
e = e_outs[index] if e_outs is not None else None
vo = vo_outs[index] if vo_outs is not None else None
eo = eo_outs[index] if eo_outs is not None else None
data_list.append(Data(v_outs=v, e_outs=e, g_outs=g,
vo_outs=vo, eo_outs=eo, go_outs=go))
else:
data_list.append(Data(g_outs=g, go_outs=go))
return data_list
@staticmethod
def _reorder_shuffled_objects(v_out, e_out, g_out, vo_out, eo_out, go_out, data_loader):
if type(data_loader.sampler) == SequentialSampler: # No permutation
return v_out, e_out, g_out, vo_out, eo_out, go_out
idxs = data_loader.sampler.permutation # permutation of the last data_loader iteration
def reorder(obj, perm):
assert len(obj) == len(perm) and len(obj) > 0
return [y for (x, y) in sorted(zip(perm, obj))]
if v_out is not None:
# print(len(v_out))
v_out = reorder(v_out, idxs)
if e_out is not None:
raise NotImplementedError('This feature has not been implemented yet!')
# e_out = reorder(e_out, idxs)
if g_out is not None:
g_out = reorder(g_out, idxs)
if vo_out is not None:
# print(len(o_out))
vo_out = reorder(vo_out, idxs)
if eo_out is not None:
# print(len(o_out))
eo_out = reorder(eo_out, idxs)
if go_out is not None:
# print(len(o_out))
go_out = reorder(go_out, idxs)
return v_out, e_out, g_out, vo_out, eo_out, go_out
def _load_outputs(self, mode, prev_outputs_to_consider):
outs_dict = {
'vertex_outputs': None,
'edge_outputs': None,
'graph_outputs': None,
'vertex_other_outputs': None,
'edge_other_outputs': None,
'graph_other_outputs': None,
}
# The elements of prev_outputs_to_consider will be concatenated in
# reverse, i.e., if prev_outputs_to_consider = 1,2,3...L
# the contribution of layer L will appear in position 0 across
# self._concat_axis, then L-1 in position 1 and so on
# this is because a hyper-parameter l_prec=1 means "previous layer"
# and prev_outputs_to_consider will be = L,L-1,...1
# so we want to reorder layers from 1 to L
for prev in prev_outputs_to_consider:
for path, o_key in [(os.path.join(self.output_folder, mode, f'vertex_output_{prev}.pt'), 'vertex_outputs'),
(os.path.join(self.output_folder, mode, f'edge_output_{prev}.pt'), 'edge_outputs'),
(os.path.join(self.output_folder, mode, f'graph_output_{prev}.pt'), 'graph_outputs'),
(os.path.join(self.output_folder, mode, f'vertex_other_output_{prev}.pt'),
'vertex_other_outputs'),
(os.path.join(self.output_folder, mode, f'edge_other_output_{prev}.pt'),
'edge_other_outputs'),
(os.path.join(self.output_folder, mode, f'graph_other_output_{prev}.pt'),
'graph_other_outputs'), ]:
if os.path.exists(path):
out = torch.load(path)
outs = outs_dict[o_key]
if outs is None:
# print('None!')
outs = [None] * len(out)
# print(path, o_key, len(out))
# print(out[0].shape)
for graph_id in range(len(out)): # iterate over graphs
outs[graph_id] = out[graph_id] if outs[graph_id] is None \
else torch.cat((out[graph_id], outs[graph_id]), self._concat_axis)
outs_dict[o_key] = outs
return outs_dict['vertex_outputs'], outs_dict['edge_outputs'], \
outs_dict['graph_outputs'], outs_dict['vertex_other_outputs'], \
outs_dict['edge_other_outputs'], outs_dict['graph_other_outputs']
def _store_outputs(self, mode, depth, v_tensor_list, e_tensor_list=None, g_tensor_list=None,
vo_tensor_list=None, eo_tensor_list=None, go_tensor_list=None):
if not os.path.exists(os.path.join(self.output_folder, mode)):
os.makedirs(os.path.join(self.output_folder, mode))
if v_tensor_list is not None:
vertex_filepath = os.path.join(self.output_folder, mode, f'vertex_output_{depth}.pt')
torch.save([torch.unsqueeze(v_tensor, self._concat_axis) for v_tensor in v_tensor_list], vertex_filepath)
if e_tensor_list is not None:
edge_filepath = os.path.join(self.output_folder, mode, f'edge_output_{depth}.pt')
torch.save([torch.unsqueeze(e_tensor, self._concat_axis) for e_tensor in e_tensor_list], edge_filepath)
if g_tensor_list is not None:
graph_filepath = os.path.join(self.output_folder, mode, f'graph_output_{depth}.pt')
torch.save([torch.unsqueeze(g_tensor, self._concat_axis) for g_tensor in g_tensor_list], graph_filepath)
if vo_tensor_list is not None:
vertex_other_filepath = os.path.join(self.output_folder, mode, f'vertex_other_output_{depth}.pt')
torch.save([torch.unsqueeze(vo_tensor, self._concat_axis) for vo_tensor in vo_tensor_list],
vertex_other_filepath)
if eo_tensor_list is not None:
edge_other_filepath = os.path.join(self.output_folder, mode, f'edge_other_output_{depth}.pt')
torch.save([torch.unsqueeze(eo_tensor, self._concat_axis) for eo_tensor in eo_tensor_list],
edge_other_filepath)
if go_tensor_list is not None:
graph_other_filepath = os.path.join(self.output_folder, mode, f'graph_other_output_{depth}.pt')
torch.save([torch.unsqueeze(go_tensor, self._concat_axis) for go_tensor in go_tensor_list],
graph_other_filepath)
def run_valid(self, dataset_getter, logger):
"""
This function returns the training and validation or test accuracy
:return: (training accuracy, validation/test accuracy)
"""
batch_size = self.model_config.layer_config['batch_size']
arbitrary_logic_batch_size = self.model_config.layer_config['arbitrary_function_config']['batch_size']
shuffle = self.model_config.layer_config['shuffle'] \
if 'shuffle' in self.model_config.layer_config else True
arbitrary_logic_shuffle = self.model_config.layer_config['arbitrary_function_config']['shuffle'] \
if 'shuffle' in self.model_config.layer_config['arbitrary_function_config'] else True
# Instantiate the Dataset
dim_node_features = dataset_getter.get_dim_node_features()
dim_edge_features = dataset_getter.get_dim_edge_features()
dim_target = dataset_getter.get_dim_target()
layers = []
l_prec = self.model_config.layer_config['previous_layers_to_use'].split(',')
concatenate_axis = self.model_config.layer_config['concatenate_on_axis']
max_layers = self.model_config.layer_config['max_layers']
assert concatenate_axis > 0, 'You cannot concat on the first axis for design reasons.'
dict_per_layer = []
stop = False
depth = 1
while not stop and depth <= max_layers:
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}')
# load output will concatenate in reverse order
prev_outputs_to_consider = [(depth - int(x)) for x in l_prec if (depth - int(x)) > 0]
train_out = self._create_extra_dataset(prev_outputs_to_consider, mode='train', depth=depth)
val_out = self._create_extra_dataset(prev_outputs_to_consider, mode='validation', depth=depth)
train_loader = dataset_getter.get_inner_train(batch_size=batch_size, shuffle=shuffle, extra=train_out)
val_loader = dataset_getter.get_inner_val(batch_size=batch_size, shuffle=shuffle, extra=val_out)
# Instantiate the Model
new_layer = self.create_incremental_model(dim_node_features, dim_edge_features, dim_target, depth,
prev_outputs_to_consider)
# Instantiate the engine (it handles the training loop and the inference phase by abstracting the specifics)
incremental_training_engine = self.create_incremental_engine(new_layer)
train_loss, train_score, train_out, \
val_loss, val_score, val_out, \
_, _, _ = incremental_training_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=None,
max_epochs=self.model_config.layer_config['epochs'],
logger=logger)
for loader, out, mode in [(train_loader, train_out, 'train'), (val_loader, val_out, 'validation')]:
v_out, e_out, g_out, vo_out, eo_out, go_out = out
# Reorder outputs, which are produced in shuffled order, to the original arrangement of the dataset.
v_out, e_out, g_out, vo_out, eo_out, go_out = self._reorder_shuffled_objects(v_out, e_out, g_out,
vo_out, eo_out, go_out,
loader)
# Store outputs
self._store_outputs(mode, depth, v_out, e_out, g_out, vo_out, eo_out, go_out)
# Consider all previous layers now, i.e. gather all the embeddings
prev_outputs_to_consider = [l for l in range(1, depth + 1)]
prev_outputs_to_consider.reverse() # load output will concatenate in reverse order
train_out = self._create_extra_dataset(prev_outputs_to_consider, mode='train', depth=depth)
val_out = self._create_extra_dataset(prev_outputs_to_consider, mode='validation', depth=depth)
train_loader = dataset_getter.get_inner_train(batch_size=arbitrary_logic_batch_size,
shuffle=arbitrary_logic_shuffle, extra=train_out)
val_loader = dataset_getter.get_inner_val(batch_size=arbitrary_logic_batch_size,
shuffle=arbitrary_logic_shuffle, extra=val_out)
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}_stopping_criterion')
# Stopping criterion based on training of the model
stop = new_layer.stopping_criterion(depth, max_layers, train_loss, train_score, val_loss, val_score,
dict_per_layer, self.model_config.layer_config, logger=logger)
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}_arbitrary_config')
if stop:
if 'CA' in self.model_config.layer_config:
# ECGMM
dim_features = new_layer.dim_node_features, new_layer.C * new_layer.depth + new_layer.CA * new_layer.depth if not new_layer.unibigram else (
new_layer.C + new_layer.CA + new_layer.C * new_layer.C) * new_layer.depth
else:
# CGMM
dim_features = new_layer.dim_node_features, new_layer.C * new_layer.depth if not new_layer.unibigram else (
new_layer.C + new_layer.C * new_layer.C) * new_layer.depth
config = self.model_config.layer_config['arbitrary_function_config']
device = config['device']
predictor_class = s2c(config['predictor'])
model = predictor_class(dim_node_features=dim_features,
dim_edge_features=0,
dim_target=dim_target,
config=config)
predictor_engine = self._create_engine(config, model, device, log_every=self.model_config.log_every)
train_loss, train_score, _, \
val_loss, val_score, _, \
_, _, _ = predictor_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=None,
max_epochs=config['epochs'],
logger=logger)
d = {'train_loss': train_loss, 'train_score': train_score,
'validation_loss': val_loss, 'validation_score': val_score}
else:
d = {}
# Append layer
layers.append(new_layer)
dict_per_layer.append(d)
# Give priority to arbitrary function
stop = d['stop'] if 'stop' in d else stop
depth += 1
# CLEAR OUTPUTS TO SAVE SPACE
for mode in ['train', 'validation']:
shutil.rmtree(os.path.join(self.output_folder, mode), ignore_errors=True)
train_res = {LOSS: dict_per_layer[-1]['train_loss'], SCORE: dict_per_layer[-1]['train_score']}
val_res = {LOSS: dict_per_layer[-1]['validation_loss'], SCORE: dict_per_layer[-1]['validation_score']}
return train_res, val_res
def run_test(self, dataset_getter, logger):
"""
This function returns the training and test accuracy. DO NOT USE THE TEST FOR ANY REASON
:return: (training accuracy, test accuracy)
"""
batch_size = self.model_config.layer_config['batch_size']
arbitrary_logic_batch_size = self.model_config.layer_config['arbitrary_function_config']['batch_size']
shuffle = self.model_config.layer_config['shuffle'] \
if 'shuffle' in self.model_config.layer_config else True
arbitrary_logic_shuffle = self.model_config.layer_config['arbitrary_function_config']['shuffle'] \
if 'shuffle' in self.model_config.layer_config['arbitrary_function_config'] else True
# Instantiate the Dataset
dim_node_features = dataset_getter.get_dim_node_features()
dim_edge_features = dataset_getter.get_dim_edge_features()
dim_target = dataset_getter.get_dim_target()
layers = []
l_prec = self.model_config.layer_config['previous_layers_to_use'].split(',')
concatenate_axis = self.model_config.layer_config['concatenate_on_axis']
max_layers = self.model_config.layer_config['max_layers']
assert concatenate_axis > 0, 'You cannot concat on the first axis for design reasons.'
dict_per_layer = []
stop = False
depth = 1
while not stop and depth <= max_layers:
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}')
prev_outputs_to_consider = [(depth - int(x)) for x in l_prec if (depth - int(x)) > 0]
train_out = self._create_extra_dataset(prev_outputs_to_consider, mode='train', depth=depth)
val_out = self._create_extra_dataset(prev_outputs_to_consider, mode='validation', depth=depth)
test_out = self._create_extra_dataset(prev_outputs_to_consider, mode='test', depth=depth)
train_loader = dataset_getter.get_outer_train(batch_size=batch_size, shuffle=shuffle, extra=train_out)
val_loader = dataset_getter.get_outer_val(batch_size=batch_size, shuffle=shuffle, extra=val_out)
test_loader = dataset_getter.get_outer_test(batch_size=batch_size, shuffle=shuffle, extra=test_out)
# Instantiate the Model
new_layer = self.create_incremental_model(dim_node_features, dim_edge_features, dim_target,
depth, prev_outputs_to_consider)
# Instantiate the engine (it handles the training loop and inference phase by abstracting the specifics)
incremental_training_engine = self.create_incremental_engine(new_layer)
train_loss, train_score, train_out, \
val_loss, val_score, val_out, \
test_loss, test_score, test_out = incremental_training_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=test_loader,
max_epochs=
self.model_config.layer_config[
'epochs'],
logger=logger)
for loader, out, mode in [(train_loader, train_out, 'train'),
(val_loader, val_out, 'validation'),
(test_loader, test_out, 'test')]:
v_out, e_out, g_out, vo_out, eo_out, go_out = out
# Reorder outputs, which are produced in shuffled order, to the original arrangement of the dataset.
v_out, e_out, g_out, vo_out, eo_out, go_out = self._reorder_shuffled_objects(v_out, e_out, g_out,
vo_out, eo_out, go_out,
loader)
# Store outputs
self._store_outputs(mode, depth, v_out, e_out, g_out, vo_out, eo_out, go_out)
# Consider all previous layers now, i.e. gather all the embeddings
prev_outputs_to_consider = [l for l in range(1, depth + 1)]
train_out = self._create_extra_dataset(prev_outputs_to_consider, mode='train', depth=depth,
only_g_outs=True)
val_out = self._create_extra_dataset(prev_outputs_to_consider, mode='validation', depth=depth,
only_g_outs=True)
test_out = self._create_extra_dataset(prev_outputs_to_consider, mode='test', depth=depth, only_g_outs=True)
train_loader = dataset_getter.get_outer_train(batch_size=arbitrary_logic_batch_size,
shuffle=arbitrary_logic_shuffle, extra=train_out)
val_loader = dataset_getter.get_outer_val(batch_size=arbitrary_logic_batch_size,
shuffle=arbitrary_logic_shuffle, extra=val_out)
test_loader = dataset_getter.get_outer_test(batch_size=arbitrary_logic_batch_size,
shuffle=arbitrary_logic_shuffle, extra=test_out)
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}_stopping_criterion')
# Stopping criterion based on training of the model
stop = new_layer.stopping_criterion(depth, max_layers, train_loss, train_score, val_loss, val_score,
dict_per_layer, self.model_config.layer_config,
logger=logger)
# Change exp path to allow Stop & Resume
self.exp_path = os.path.join(self.root_exp_path, f'layer_{depth}_arbitrary_config')
if stop:
if 'CA' in self.model_config.layer_config:
# ECGMM
dim_features = new_layer.dim_node_features, new_layer.C * new_layer.depth + new_layer.CA * new_layer.depth if not new_layer.unibigram else (
new_layer.C + new_layer.CA + new_layer.C * new_layer.C) * new_layer.depth
else:
# CGMM
dim_features = new_layer.dim_node_features, new_layer.C * new_layer.depth if not new_layer.unibigram else (
new_layer.C + new_layer.C * new_layer.C) * new_layer.depth
config = self.model_config.layer_config['arbitrary_function_config']
device = config['device']
predictor_class = s2c(config['predictor'])
model = predictor_class(dim_node_features=dim_features,
dim_edge_features=0,
dim_target=dim_target,
config=config)
predictor_engine = self._create_engine(config, model, device, log_every=self.model_config.log_every)
train_loss, train_score, _, \
val_loss, val_score, _, \
test_loss, test_score, _ = predictor_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=test_loader,
max_epochs=config['epochs'],
logger=logger)
d = {'train_loss': train_loss, 'train_score': train_score,
'validation_loss': val_loss, 'validation_score': val_score,
'test_loss': test_loss, 'test_score': test_score}
else:
d = {}
# Append layer
layers.append(new_layer)
dict_per_layer.append(d)
# Give priority to arbitrary function
stop = d['stop'] if 'stop' in d else stop
depth += 1
# CLEAR OUTPUTS TO SAVE SPACE
for mode in ['train', 'validation', 'test']:
shutil.rmtree(os.path.join(self.output_folder, mode), ignore_errors=True)
# Use last training and test scores
train_res = {LOSS: dict_per_layer[-1]['train_loss'], SCORE: dict_per_layer[-1]['train_score']}
val_res = {LOSS: dict_per_layer[-1]['validation_loss'], SCORE: dict_per_layer[-1]['validation_score']}
test_res = {LOSS: dict_per_layer[-1]['test_loss'], SCORE: dict_per_layer[-1]['test_score']}
return train_res, val_res, test_res
| 25,222 | 54.557269 | 240 | py |
CGMM | CGMM-master/util.py | from typing import Optional, Tuple, List
import torch
import torch_geometric
def extend_lists(data_list: Optional[Tuple[Optional[List[torch.Tensor]]]],
new_data_list: Tuple[Optional[List[torch.Tensor]]]) -> Tuple[Optional[List[torch.Tensor]]]:
r"""
Extends the semantic of Python :func:`extend()` over lists to tuples
Used e.g., to concatenate results of mini-batches in incremental architectures such as :obj:`CGMM`
Args:
data_list: tuple of lists, or ``None`` if there is no list to extend.
new_data_list: object of the same form of :obj:`data_list` that has to be concatenated
Returns:
the tuple of extended lists
"""
if data_list is None:
return new_data_list
assert len(data_list) == len(new_data_list)
for i in range(len(data_list)):
if new_data_list[i] is not None:
data_list[i].extend(new_data_list[i])
return data_list
def to_tensor_lists(embeddings: Tuple[Optional[torch.Tensor]],
batch: torch_geometric.data.batch.Batch,
edge_index: torch.Tensor) -> Tuple[Optional[List[torch.Tensor]]]:
r"""
Reverts batched outputs back to a list of Tensors elements.
Can be useful to build incremental architectures such as :obj:`CGMM` that store intermediate results
before training the next layer.
Args:
embeddings (tuple): a tuple of embeddings :obj:`(vertex_output, edge_output, graph_output, vertex_extra_output, edge_extra_output, graph_extra_output)`.
Each embedding can be a :class:`torch.Tensor` or ``None``.
batch (:class:`torch_geometric.data.batch.Batch`): Batch information used to split the tensors.
edge_index (:class:`torch.Tensor`): a :obj:`2 x num_edges` tensor as defined in Pytorch Geometric.
Used to split edge Tensors graph-wise.
Returns:
a tuple with the same semantics as the argument ``embeddings``, but this time each element holds a list of
Tensors, one for each graph in the dataset.
"""
# Crucial: Detach the embeddings to free the computation graph!!
# TODO this code can surely be made more compact, but leave it as is until future refactoring or removal from PyDGN.
v_out, e_out, g_out, vo_out, eo_out, go_out = embeddings
v_out = v_out.detach() if v_out is not None else None
v_out_list = [] if v_out is not None else None
e_out = e_out.detach() if e_out is not None else None
e_out_list = [] if e_out is not None else None
g_out = g_out.detach() if g_out is not None else None
g_out_list = [] if g_out is not None else None
vo_out = vo_out.detach() if vo_out is not None else None
vo_out_list = [] if vo_out is not None else None
eo_out = eo_out.detach() if eo_out is not None else None
eo_out_list = [] if eo_out is not None else None
go_out = go_out.detach() if go_out is not None else None
go_out_list = [] if go_out is not None else None
_, node_counts = torch.unique_consecutive(batch, return_counts=True)
node_cumulative = torch.cumsum(node_counts, dim=0)
if e_out is not None or eo_out is not None:
edge_batch = batch[edge_index[0]]
_, edge_counts = torch.unique_consecutive(edge_batch, return_counts=True)
edge_cumulative = torch.cumsum(edge_counts, dim=0)
if v_out_list is not None:
v_out_list.append(v_out[:node_cumulative[0]])
if e_out_list is not None:
e_out_list.append(e_out[:edge_cumulative[0]])
if g_out_list is not None:
g_out_list.append(g_out[0].unsqueeze(0)) # recreate batch dimension by unsqueezing
if vo_out_list is not None:
vo_out_list.append(vo_out[:node_cumulative[0]])
if eo_out_list is not None:
eo_out_list.append(eo_out[:edge_cumulative[0]])
if go_out_list is not None:
go_out_list.append(go_out[0].unsqueeze(0)) # recreate batch dimension by unsqueezing
for i in range(1, len(node_cumulative)):
if v_out_list is not None:
v_out_list.append(v_out[node_cumulative[i - 1]:node_cumulative[i]])
if e_out_list is not None:
e_out_list.append(e_out[edge_cumulative[i - 1]:edge_cumulative[i]])
if g_out_list is not None:
g_out_list.append(g_out[i].unsqueeze(0)) # recreate batch dimension by unsqueezing
if vo_out_list is not None:
vo_out_list.append(vo_out[node_cumulative[i - 1]:node_cumulative[i]])
if eo_out_list is not None:
eo_out_list.append(eo_out[edge_cumulative[i - 1]:edge_cumulative[i]])
if go_out_list is not None:
go_out_list.append(go_out[i].unsqueeze(0)) # recreate batch dimension by unsqueezing
return v_out_list, e_out_list, g_out_list, vo_out_list, eo_out_list, go_out_list
def compute_unigram(posteriors: torch.Tensor, use_continuous_states: bool) -> torch.Tensor:
r"""
Computes the unigram representation of nodes as defined in https://www.jmlr.org/papers/volume21/19-470/19-470.pdf
Args:
posteriors (torch.Tensor): tensor of posterior distributions of nodes with shape `(#nodes,num_latent_states)`
use_continuous_states (bool): whether or not to use the most probable state (one-hot vector) or a "soft" version
Returns:
a tensor of unigrams with shape `(#nodes,num_latent_states)`
"""
num_latent_states = posteriors.shape[1]
if use_continuous_states:
node_embeddings_batch = posteriors
else:
node_embeddings_batch = make_one_hot(posteriors.argmax(dim=1), num_latent_states)
return node_embeddings_batch.double()
def compute_bigram(posteriors: torch.Tensor, edge_index: torch.Tensor, batch: torch.Tensor,
use_continuous_states: bool) -> torch.Tensor:
r"""
Computes the bigram representation of nodes as defined in https://www.jmlr.org/papers/volume21/19-470/19-470.pdf
Args:
posteriors (torch.Tensor): tensor of posterior distributions of nodes with shape `(#nodes,num_latent_states)`
edge_index (torch.Tensor): tensor of edge indices with shape `(2,#edges)` that adheres to PyG specifications
batch (torch.Tensor): vector that assigns each node to a graph id in the batch
use_continuous_states (bool): whether or not to use the most probable state (one-hot vector) or a "soft" version
Returns:
a tensor of bigrams with shape `(#nodes,num_latent_states*num_latent_states)`
"""
C = posteriors.shape[1]
device = posteriors.get_device()
device = 'cpu' if device == -1 else device
if use_continuous_states:
# Code provided by Daniele Atzeni to speed up the computation!
nodes_in_batch = len(batch)
sparse_adj_matrix = torch.sparse.FloatTensor(edge_index,
torch.ones(edge_index.shape[1]).to(device),
torch.Size([nodes_in_batch, nodes_in_batch]))
tmp1 = torch.sparse.mm(sparse_adj_matrix, posteriors.float()).repeat(1, C)
tmp2 = posteriors.reshape(-1, 1).repeat(1, C).reshape(-1, C * C)
node_bigram_batch = torch.mul(tmp1, tmp2)
else:
# Convert into one hot
posteriors_one_hot = make_one_hot(posteriors.argmax(dim=1), C).float()
# Code provided by Daniele Atzeni to speed up the computation!
nodes_in_batch = len(batch)
sparse_adj_matrix = torch.sparse.FloatTensor(edge_index,
torch.ones(edge_index.shape[1]).to(device),
torch.Size([nodes_in_batch, nodes_in_batch]))
tmp1 = torch.sparse.mm(sparse_adj_matrix, posteriors_one_hot).repeat(1, C)
tmp2 = posteriors_one_hot.reshape(-1, 1).repeat(1, C).reshape(-1, C * C)
node_bigram_batch = torch.mul(tmp1, tmp2)
return node_bigram_batch.double()
def make_one_hot(labels: torch.Tensor, num_unique_ids: torch.Tensor) -> torch.Tensor:
r"""
Converts a vector of ids into a one-hot matrix
Args:
labels (torch.Tensor): the vector of ids
num_unique_ids (torch.Tensor): number of unique ids
Returns:
a one-hot tensor with shape `(samples,num_unique_ids)`
"""
device = labels.get_device()
device = 'cpu' if device == -1 else device
one_hot = torch.zeros(labels.size(0), num_unique_ids).to(device)
one_hot[torch.arange(labels.size(0)).to(device), labels] = 1
return one_hot
| 8,585 | 41.50495 | 160 | py |
CGMM | CGMM-master/__init__.py | 0 | 0 | 0 | py |
|
CGMM | CGMM-master/incremental_engine.py | import torch
from pydgn.training.engine import TrainingEngine
from util import extend_lists, to_tensor_lists
class IncrementalTrainingEngine(TrainingEngine):
def __init__(self, engine_callback, model, loss, **kwargs):
super().__init__(engine_callback, model, loss, **kwargs)
def _to_list(self, data_list, embeddings, batch, edge_index, y):
if isinstance(embeddings, tuple):
embeddings = tuple([e.detach().cpu() if e is not None else None for e in embeddings])
elif isinstance(embeddings, torch.Tensor):
embeddings = embeddings.detach().cpu()
else:
raise NotImplementedError('Embeddings not understood, should be Tensor or Tuple of Tensors')
data_list = extend_lists(data_list, to_tensor_lists(embeddings, batch, edge_index))
return data_list | 838 | 40.95 | 104 | py |
CGMM | CGMM-master/cgmm_classifier_task.py | import os
import torch
from cgmm_incremental_task import CGMMTask
from pydgn.experiment.util import s2c
from pydgn.static import LOSS, SCORE
from torch_geometric.data import Data
from torch_geometric.loader import DataLoader
# This works with graph classification only
class ClassifierCGMMTask(CGMMTask):
def run_valid(self, dataset_getter, logger):
"""
This function returns the training and validation or test accuracy
:return: (training accuracy, validation/test accuracy)
"""
# Necessary info to give a unique name to the dataset (some hyper-params like epochs are assumed to be fixed)
embeddings_folder = self.model_config.layer_config['embeddings_folder']
max_layers = self.model_config.layer_config['max_layers']
layers = self.model_config.layer_config['layers']
unibigram = self.model_config.layer_config['unibigram']
C = self.model_config.layer_config['C']
CA = self.model_config.layer_config['CA'] if 'CA' in self.model_config.layer_config else None
aggregation = self.model_config.layer_config['aggregation']
infer_with_posterior = self.model_config.layer_config['infer_with_posterior']
outer_k = dataset_getter.outer_k
inner_k = dataset_getter.inner_k
# ====
base_path = os.path.join(embeddings_folder, dataset_getter.dataset_name,
f'{max_layers}_{unibigram}_{C}_{CA}_{aggregation}_{infer_with_posterior}_{outer_k + 1}_{inner_k + 1}')
train_out_emb = torch.load(base_path + '_train.torch')[:, :layers, :]
val_out_emb = torch.load(base_path + '_val.torch')[:, :layers, :]
train_out_emb = torch.reshape(train_out_emb, (train_out_emb.shape[0], -1))
val_out_emb = torch.reshape(val_out_emb, (val_out_emb.shape[0], -1))
# Recover the targets
fake_train_loader = dataset_getter.get_inner_train(batch_size=1, shuffle=False)
fake_val_loader = dataset_getter.get_inner_val(batch_size=1, shuffle=False)
train_y = [el.y for el in fake_train_loader.dataset]
val_y = [el.y for el in fake_val_loader.dataset]
arbitrary_logic_batch_size = self.model_config.layer_config['arbitrary_function_config']['batch_size']
arbitrary_logic_shuffle = self.model_config.layer_config['arbitrary_function_config']['shuffle'] \
if 'shuffle' in self.model_config.layer_config['arbitrary_function_config'] else True
# build data lists
train_list = [Data(x=train_out_emb[i].unsqueeze(0), y=train_y[i]) for i in range(train_out_emb.shape[0])]
val_list = [Data(x=val_out_emb[i].unsqueeze(0), y=val_y[i]) for i in range(val_out_emb.shape[0])]
train_loader = DataLoader(train_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)
val_loader = DataLoader(val_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)
# Instantiate the Dataset
dim_features = train_out_emb.shape[1]
dim_target = dataset_getter.get_dim_target()
config = self.model_config.layer_config['arbitrary_function_config']
device = config['device']
predictor_class = s2c(config['readout'])
model = predictor_class(dim_node_features=dim_features,
dim_edge_features=0,
dim_target=dim_target,
config=config)
predictor_engine = self._create_engine(config, model, device, evaluate_every=self.model_config.evaluate_every)
train_loss, train_score, _, \
val_loss, val_score, _, \
_, _, _ = predictor_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=None,
max_epochs=config['epochs'],
logger=logger)
train_res = {LOSS: train_loss, SCORE: train_score}
val_res = {LOSS: val_loss, SCORE: val_score}
return train_res, val_res
def run_test(self, dataset_getter, logger):
"""
This function returns the training and test accuracy. DO NOT USE THE TEST FOR ANY REASON
:return: (training accuracy, test accuracy)
"""
# Necessary info to give a unique name to the dataset (some hyper-params like epochs are assumed to be fixed)
embeddings_folder = self.model_config.layer_config['embeddings_folder']
max_layers = self.model_config.layer_config['max_layers']
layers = self.model_config.layer_config['layers']
unibigram = self.model_config.layer_config['unibigram']
C = self.model_config.layer_config['C']
CA = self.model_config.layer_config['CA'] if 'CA' in self.model_config.layer_config else None
aggregation = self.model_config.layer_config['aggregation']
infer_with_posterior = self.model_config.layer_config['infer_with_posterior']
outer_k = dataset_getter.outer_k
inner_k = dataset_getter.inner_k
if inner_k is None: # workaround the "safety" procedure of evaluation protocol, but we will not do anything wrong.
dataset_getter.set_inner_k(0)
inner_k = 0 # pick the split of the first inner fold
# ====
# NOTE: We reload the associated inner train and val splits, using the outer_test for assessment.
# This is slightly different from standard exps, where we compute a different outer train-val split, but it should not change things much.
base_path = os.path.join(embeddings_folder, dataset_getter.dataset_name,
f'{max_layers}_{unibigram}_{C}_{CA}_{aggregation}_{infer_with_posterior}_{outer_k + 1}_{inner_k + 1}')
train_out_emb = torch.load(base_path + '_train.torch')[:, :layers, :]
val_out_emb = torch.load(base_path + '_val.torch')[:, :layers, :]
test_out_emb = torch.load(base_path + '_test.torch')[:, :layers, :]
train_out_emb = torch.reshape(train_out_emb, (train_out_emb.shape[0], -1))
val_out_emb = torch.reshape(val_out_emb, (val_out_emb.shape[0], -1))
test_out_emb = torch.reshape(test_out_emb, (test_out_emb.shape[0], -1))
# Recover the targets
fake_train_loader = dataset_getter.get_inner_train(batch_size=1, shuffle=False)
fake_val_loader = dataset_getter.get_inner_val(batch_size=1, shuffle=False)
fake_test_loader = dataset_getter.get_outer_test(batch_size=1, shuffle=False)
train_y = [el.y for el in fake_train_loader.dataset]
val_y = [el.y for el in fake_val_loader.dataset]
test_y = [el.y for el in fake_test_loader.dataset]
arbitrary_logic_batch_size = self.model_config.layer_config['arbitrary_function_config']['batch_size']
arbitrary_logic_shuffle = self.model_config.layer_config['arbitrary_function_config']['shuffle'] \
if 'shuffle' in self.model_config.layer_config['arbitrary_function_config'] else True
# build data lists
train_list = [Data(x=train_out_emb[i].unsqueeze(0), y=train_y[i]) for i in range(train_out_emb.shape[0])]
val_list = [Data(x=val_out_emb[i].unsqueeze(0), y=val_y[i]) for i in range(val_out_emb.shape[0])]
test_list = [Data(x=test_out_emb[i].unsqueeze(0), y=test_y[i]) for i in range(test_out_emb.shape[0])]
train_loader = DataLoader(train_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)
val_loader = DataLoader(val_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)
test_loader = DataLoader(test_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)
# Instantiate the Dataset
dim_features = train_out_emb.shape[1]
dim_target = dataset_getter.get_dim_target()
config = self.model_config.layer_config['arbitrary_function_config']
device = config['device']
predictor_class = s2c(config['readout'])
model = predictor_class(dim_node_features=dim_features,
dim_edge_features=0,
dim_target=dim_target,
config=config)
predictor_engine = self._create_engine(config, model, device, evaluate_every=self.model_config.evaluate_every)
train_loss, train_score, _, \
val_loss, val_score, _, \
test_loss, test_score, _ = predictor_engine.train(train_loader=train_loader,
validation_loader=val_loader,
test_loader=test_loader,
max_epochs=config['epochs'],
logger=logger)
train_res = {LOSS: train_loss, SCORE: train_score}
val_res = {LOSS: val_loss, SCORE: val_score}
test_res = {LOSS: test_loss, SCORE: test_score}
return train_res, val_res, test_res
| 9,149 | 55.481481 | 146 | py |
CGMM | CGMM-master/provider.py | import random
import numpy as np
from pydgn.data.dataset import ZipDataset
from pydgn.data.provider import DataProvider
from pydgn.data.sampler import RandomSampler
from torch.utils.data import Subset
def seed_worker(exp_seed, worker_id):
np.random.seed(exp_seed + worker_id)
random.seed(exp_seed + worker_id)
class IncrementalDataProvider(DataProvider):
"""
An extension of the DataProvider class to deal with the intermediate outputs produced by incremental architectures
Used by CGMM to deal with node/graph classification/regression.
"""
def _get_loader(self, indices, **kwargs):
"""
Takes the "extra" argument from kwargs and zips it together with the original data into a ZipDataset
:param indices: indices of the subset of the data to be extracted
:param kwargs: an arbitrary dictionary
:return: a DataLoader
"""
dataset = self._get_dataset()
dataset = Subset(dataset, indices)
dataset_extra = kwargs.pop("extra", None)
if dataset_extra is not None and isinstance(dataset_extra, list) and len(dataset_extra) > 0:
assert len(dataset) == len(dataset_extra), (dataset, dataset_extra)
datasets = [dataset, dataset_extra]
dataset = ZipDataset(datasets)
elif dataset_extra is None or (isinstance(dataset_extra, list) and len(dataset_extra) == 0):
pass
else:
raise NotImplementedError("Check that extra is None, an empty list or a non-empty list")
shuffle = kwargs.pop("shuffle", False)
assert self.exp_seed is not None, 'DataLoader seed has not been specified! Is this a bug?'
kwargs['worker_init_fn'] = lambda worker_id: seed_worker(worker_id, self.exp_seed)
kwargs.update(self.data_loader_args)
if shuffle is True:
sampler = RandomSampler(dataset)
dataloader = self.data_loader_class(dataset, sampler=sampler,
**kwargs)
else:
dataloader = self.data_loader_class(dataset, shuffle=False,
**kwargs)
return dataloader
| 2,201 | 37.631579 | 118 | py |
CGMM | CGMM-master/optimizer.py | from pydgn.training.callback.optimizer import Optimizer
from pydgn.training.event.handler import EventHandler
class CGMMOptimizer(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_eval_epoch_start(self, state):
"""
Use the "return_node_embeddings" field of the state to decide whether to compute statistics or not during
this evaluation epoch
:param state: the shared State object
"""
state.model.return_node_embeddings = state.return_node_embeddings
# Not necessary, but it may help to debug
def on_eval_epoch_end(self, state):
"""
Reset the "return_node_embeddings" field to False
:param state:
:return:
"""
state.model.return_node_embeddings = False
def on_training_epoch_end(self, state):
"""
Calls the M_step to update the parameters
:param state: the shared State object
:return:
"""
state.model.m_step()
| 1,014 | 29.757576 | 113 | py |
BVQI | BVQI-master/temporal_naturalness.py | import torch
import argparse
import pickle as pkl
import numpy as np
import math
import torch
import torch.nn.functional as F
import yaml
from scipy.stats import pearsonr, spearmanr
from scipy.stats import kendalltau as kendallr
from tqdm import tqdm
from sklearn import decomposition
import time
from buona_vista import datasets
from V1_extraction.gabor_filter import GaborFilters
from V1_extraction.utilities import compute_v1_curvature, compute_discrete_v1_curvature
class PCA:
def __init__(self, n_components):
self.n_components = n_components
def fit_transform(self, X):
# Center the data
X_centered = X - X.mean(dim=0)
# Compute the SVD
U, S, V = torch.svd(X_centered)
# Compute the principal components
components = V[:, :self.n_components]
# Project the data onto the principal components
scores = torch.matmul(X_centered, components)
return scores
def rescale(x):
x = np.array(x)
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(x))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--opt",
type=str,
default="buona_vista_tn_index.yml",
help="the option file",
)
parser.add_argument(
"-d", "--device", type=str, default="cuda", help="the running device"
)
args = parser.parse_args()
results = {}
with open(args.opt, "r") as f:
opt = yaml.safe_load(f)
scale = 6
orientations = 8
kernel_size = 39
row_downsample = 4
column_downsample = 4
pca_d = 5
frame_bs = 32
pca = PCA(pca_d)
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=args.device
)
val_datasets = {}
for name, dataset in opt["data"].items():
val_datasets[name] = getattr(datasets, dataset["type"])(dataset["args"])
for val_name, val_dataset in val_datasets.items():
prs, gts = [], []
results[val_name] = {"gt": [], "tn_index": []}
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, num_workers=opt["num_workers"], pin_memory=True,
)
for i, data in enumerate(tqdm(val_loader, desc=f"Evaluating in dataset [{val_name}].")):
with torch.no_grad():
video_frames = data["original_tn"].squeeze(0).to(args.device).transpose(0,1)
if video_frames.shape[-1] > 600:
video_frames = F.interpolate(video_frames, (270,480))
video_frames = video_frames.mean(1,keepdim=True)
zero_frames = torch.zeros(video_frames.shape).to(args.device)
complex_frames = torch.stack((video_frames, zero_frames), -1)
video_frames = torch.view_as_complex(complex_frames)
v1_features = []
for i in range((video_frames.shape[0] - 1) // frame_bs):
these_frames = video_frames[i * frame_bs:(i+1)* frame_bs]
with torch.no_grad():
these_features = gb(these_frames)
v1_features.append(these_features)
last_start = ((video_frames.shape[0] - 1) // frame_bs) * frame_bs
v1_features += [gb(video_frames[last_start:])]
v1_features = torch.cat(v1_features, 0)
v1_features = torch.nan_to_num(v1_features)
v1_PCA = pca.fit_transform(v1_features)
v1_score = compute_v1_curvature(v1_PCA.cpu().numpy(), fsize=8)
try:
temporal_naturalness_index = math.log(np.mean(v1_score))
except:
#print(np.mean(v1_score))
temporal_naturalness_index = min(prs) - 1
results[val_name]["tn_index"].append(temporal_naturalness_index)
if not np.isnan(temporal_naturalness_index):
prs.append(temporal_naturalness_index)
gts.append(data["gt_label"][0].item())
#if i % 200 == 0:
#print(i)
#print(spearmanr(prs, gts)[0])
# Sigmoid-like Rescaling
prs = rescale(prs)
#results[val_name]["tn_index"] = rescale(results[val_name]["tn_index"])
with open("temporal_naturalness_39.pkl", "wb") as f:
pkl.dump(results, f)
print(
"Dataset:",
val_name,
"Length:",
len(val_dataset),
"SRCC:",
spearmanr(prs, gts)[0],
"PLCC:",
pearsonr(prs, gts)[0],
"KRCC:",
kendallr(prs, gts)[0],
) | 4,901 | 30.625806 | 96 | py |
BVQI | BVQI-master/semantic_affinity.py | ## Contributed by Teo Haoning Wu, Daniel Annan Wang
import argparse
import pickle as pkl
import open_clip
import numpy as np
import torch
import yaml
from scipy.stats import pearsonr, spearmanr
from scipy.stats import kendalltau as kendallr
from tqdm import tqdm
from buona_vista import datasets
def rescale(x):
x = np.array(x)
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(-x))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--opt",
type=str,
default="./buona_vista_sa_index.yml",
help="the option file",
)
parser.add_argument(
"-d", "--device", type=str, default="cuda", help="the running device"
)
parser.add_argument(
"-l", "--local", action="store_true", help="Use BVQI-Local"
)
args = parser.parse_args()
with open(args.opt, "r") as f:
opt = yaml.safe_load(f)
val_datasets = {}
for name, dataset in opt["data"].items():
val_datasets[name] = getattr(datasets, dataset["type"])(dataset["args"])
print(open_clip.list_pretrained())
model, _, preprocess = open_clip.create_model_and_transforms("RN50",pretrained="openai")
model = model.to(args.device)
print("loading succeed")
texts = [
"a high quality photo",
"a low quality photo",
"a good photo",
"a bad photo",
]
tokenizer = open_clip.get_tokenizer("RN50")
text_tokens = tokenizer(texts).to(args.device)
print(f"Prompt_loading_succeed, {texts}")
results = {}
for val_name, val_dataset in val_datasets.items():
prs, gts = [], []
results[val_name] = {"gt": [], "sa_index": [], "raw_index": []}
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, num_workers=opt["num_workers"], pin_memory=True,
)
for i, data in enumerate(tqdm(val_loader, desc=f"Evaluating in dataset [{val_name}].")):
video_frames = data["aesthetic"].squeeze(0)
image_input = torch.transpose(video_frames, 0, 1).to(args.device)
with torch.no_grad():
image_features = model.encode_image(image_input).float() #.mean(0)
text_features = model.encode_text(text_tokens).float()
logits_per_image = image_features @ text_features.T
#logits_per_image = logits_per_image.softmax(dim=-1)
#logits_per_image, logits_per_text = model(image_input, text_tokens)
probs_a = logits_per_image.cpu().numpy()
semantic_affinity_index = 0
for k in [0,1]:
pn_pair = torch.from_numpy(probs_a[..., 2 * k : 2 * k + 2]).float().numpy()
semantic_affinity_index += pn_pair[...,0] - pn_pair[...,1]
if args.local:
# Use the local feature after AttnPooling
prs.append(semantic_affinity_index[1:].mean())
else:
# Use the global feature after AttnPooling
prs.append(semantic_affinity_index[0].mean())
results[val_name]["gt"].append(data["gt_label"][0].item())
gts.append(data["gt_label"][0].item())
results[val_name]["raw_index"].append(semantic_affinity_index)
prs = rescale(prs)
with open("semantic_affinity_pubs.pkl", "wb") as f:
results[val_name]["sa_index"] = prs
pkl.dump(results, f)
print(
"Dataset:",
val_name,
"Length:",
len(val_dataset),
"SRCC:",
spearmanr(prs, gts)[0],
"PLCC:",
pearsonr(prs, gts)[0],
"KRCC:",
kendallr(prs, gts)[0],
)
| 3,856 | 30.614754 | 96 | py |
BVQI | BVQI-master/spatial_naturalness.py | # Contributed by Teo Haoning Wu, Erli Zhang Karl
import argparse
import glob
import math
import os
import pickle as pkl
from collections import OrderedDict
import decord
import numpy as np
import torch
import torchvision as tv
import yaml
from pyiqa import create_metric
from pyiqa.default_model_configs import DEFAULT_CONFIGS
from pyiqa.utils.img_util import imread2tensor
from pyiqa.utils.registry import ARCH_REGISTRY
from scipy.stats import kendalltau as kendallr
from scipy.stats import pearsonr, spearmanr
from tqdm import tqdm
from torch.nn.functional import interpolate
from buona_vista.datasets import ViewDecompositionDataset
from skvideo.measure import niqe
def rescale(x):
x = np.array(x)
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(x))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--opt",
type=str,
default="./buona_vista_sn_index.yml",
help="the option file",
)
parser.add_argument(
"-d", "--device", type=str, default="cuda", help="the running device"
)
args = parser.parse_args()
with open(args.opt, "r") as f:
opt = yaml.safe_load(f)
metric_name = "niqe"
# set up IQA model
iqa_model = create_metric(metric_name, metric_mode="NR")
# pbar = tqdm(total=test_img_num, unit='image')
lower_better = DEFAULT_CONFIGS[metric_name].get("lower_better", False)
device = args.device
net_opts = OrderedDict()
kwargs = {}
if metric_name in DEFAULT_CONFIGS.keys():
default_opt = DEFAULT_CONFIGS[metric_name]["metric_opts"]
net_opts.update(default_opt)
# then update with custom setting
net_opts.update(kwargs)
network_type = net_opts.pop("type")
net = ARCH_REGISTRY.get(network_type)(**net_opts)
net = net.to(device)
for key in opt["data"].keys():
if "val" not in key and "test" not in key:
continue
dopt = opt["data"][key]["args"]
val_dataset = ViewDecompositionDataset(dopt)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, num_workers=opt["num_workers"], pin_memory=True,
)
pr_labels, gt_labels = [], []
for data in tqdm(val_loader, desc=f"Evaluating in dataset [{key}]."):
target = (
data["original"].squeeze(0).transpose(0, 1)
) # C, T, H, W to N(T), C, H, W
if min(target.shape[2:]) < 224:
target = interpolate(target, scale_factor = 224 / min(target.shape[2:]))
with torch.no_grad():
score = net((target.to(device))).mean().item()
if math.isnan(score):
print(score, target.shape)
score = max(pr_labels) + 1
#with open(output_result_csv, "a") as w:
# w.write(f'{data["name"][0]}, {score}\n')
pr_labels.append(score)
gt_labels.append(data["gt_label"].item())
pr_labels = rescale(pr_labels)
s = spearmanr(gt_labels, pr_labels)[0]
p = pearsonr(gt_labels, pr_labels)[0]
k = kendallr(gt_labels, pr_labels)[0]
with open(f"spatial_naturalness_{key}.pkl", "wb") as f:
pkl.dump({"pr_labels": pr_labels,
"gt_labels": gt_labels}, f)
print(s, p, k)
| 3,380 | 28.657895 | 88 | py |
BVQI | BVQI-master/prompt_tuning.py | import os, glob
import argparse
import pickle as pkl
import random
from copy import deepcopy
import open_clip
import numpy as np
import torch
import torch.nn as nn
import yaml
from scipy.stats import pearsonr, spearmanr
from scipy.stats import kendalltau as kendallr
from tqdm import tqdm
from buona_vista import datasets
from load_features import get_features
class TextEncoder(nn.Module):
def __init__(self, clip_model):
super().__init__()
self.transformer = clip_model.transformer
self.positional_embedding = clip_model.positional_embedding
self.ln_final = clip_model.ln_final
self.text_projection = clip_model.text_projection
self.dtype = clip_model.transformer.get_cast_dtype()
self.attn_mask = clip_model.attn_mask
def forward(self, prompts, tokenized_prompts):
x = prompts + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), tokenized_prompts.argmax(dim=-1)] @ self.text_projection
return x
class MLP(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super().__init__()
self.in_ln = nn.Linear(in_channels, hidden_channels, bias=False)
self.out_ln = nn.Linear(hidden_channels, out_channels, bias=False)
self.gelu = nn.GELU()
self.dropout = nn.Dropout(0.5)
self.bn = nn.BatchNorm2d(1, affine=False)
def forward(self, x):
bef_norm = self.out_ln(self.dropout(self.gelu(self.in_ln(x)))).squeeze(-1)
return (torch.sigmoid(self.bn(bef_norm[:, None, :, :])))
class FFN(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.ln = nn.Linear(in_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(1, affine=False)
def forward(self, x):
bef_norm = self.ln(x).squeeze(-1)
return (torch.sigmoid(self.bn(bef_norm[:, None, :, :])))
class VisualFeatureDataset(torch.utils.data.Dataset):
def __init__(self, dataset_name, indices=None):
super().__init__()
if indices == None:
indices = range(len(sn[dataset_name]))
print("Using all indices:", indices)
self.temporal = [tn2[dataset_name][ind] for ind in indices]
self.spatial = [sn[dataset_name][ind] for ind in indices]
self.clip_visual_features = [visual_features[dataset_name][ind] for ind in indices]
self.gts = [gts[dataset_name][ind] for ind in indices]
def __getitem__(self, index):
return self.clip_visual_features[index], self.spatial[index], self.temporal[index], self.gts[index]
def __len__(self):
return len(self.gts)
class FastVisualFeatureDataset(torch.utils.data.Dataset):
def __init__(self, dataset_name, indices=None):
super().__init__()
if indices == None:
indices = range(len(sn[dataset_name]))
print("Using all indices:", indices)
self.temporal = [tn2[dataset_name][ind] for ind in indices]
self.spatial = [sn[dataset_name][ind] for ind in indices]
self.clip_visual_features = [visual_features[dataset_name][ind] for ind in indices]
self.fast_visual_features = [fast_visual_features[dataset_name]["feats"][ind] for ind in indices]
self.gts = [gts[dataset_name][ind] for ind in indices]
def __getitem__(self, index):
return self.clip_visual_features[index], self.spatial[index], self.temporal[index], self.gts[index], self.fast_visual_features[index].reshape(4,1,768)
def __len__(self):
return len(self.gts)
class SimpleFeatureDataset(torch.utils.data.Dataset):
def __init__(self, dataset_name, indices):
super().__init__()
#self.temporal = [tn2[dataset_name][ind] for ind in indices]
#self.spatial = [sn[dataset_name][ind] for ind in indices]
self.clip_visual_features = [visual_features[dataset_name][ind] for ind in indices]
self.gts = [gts[dataset_name][ind] for ind in indices]
def __getitem__(self, index):
return self.clip_visual_features[index], self.gts[index]
def __len__(self):
return len(self.gts)
class BVQI(nn.Module):
"""
Modified CLIP, which combined prompt tuning and feature adaptation.
The spatial and temporal naturalnesses are fed as final features.
Implcit features is also optional fed into the model.
"""
def __init__(self, text_tokens, embedding, n_pairs=2,implicit=False, optimizable_encoder=None):
super().__init__()
self.n_pairs = n_pairs
self.device = "cuda"
self.implicit = implicit
if self.implicit:
self.implicit_mlp = MLP(1024,64,1)
self.tokenized_prompts = text_tokens
#self.text_encoder = TextEncoder(clip_model)
if optimizable_encoder is not None:
print("Optimizing the text encoder.")
self.optimizable_encoder = deepcopy(text_encoder)
for param in self.optimizable_encoder.parameters():
param.requires_grad = True
if n_ctx > 0:
self.ctx = nn.Parameter(embedding[:, 1:1+n_ctx].clone())
else:
self.register_buffer("ctx", embedding[:, 1:1, :])
print("Disabled Context Prompt")
self.register_buffer("prefix", embedding[:, :1, :].clone()) # SOS
self.register_buffer("suffix", embedding[:, 1 + n_ctx:, :].clone())# CLS, EOS
self.prefix.requires_grad = False
self.suffix.requires_grad = False
self.dropout = nn.Dropout(0.5)
self.final_ln = nn.Linear(n_pairs+2+implicit,1,bias=False)
print(self.final_ln)
torch.nn.init.constant_(self.final_ln.weight, 1)
n_prompts = self.get_text_prompts()
self.text_feats = text_encoder(n_prompts.cuda(), self.tokenized_prompts)
def get_text_prompts(self):
return torch.cat(
[
self.prefix, # (n_cls, 1, dim)
self.ctx, # (n_cls, n_ctx, dim)
self.suffix, # (n_cls, *, dim)
],
dim=1,
)
def forward(self, vis_feat, sn_ind=None, tn_ind=None, train=True):
n_prompts = self.get_text_prompts()
if train:
if hasattr(self, "optimizable_encoder"):
text_feats = self.optimizable_encoder(n_prompts, self.tokenized_prompts)
else:
text_feats = text_encoder(n_prompts, self.tokenized_prompts)
self.text_feats = text_feats
else:
text_feats = self.text_feats
vis_feats = vis_feat[:,1:].to(self.device)
if self.implicit:
sa_ind = [self.implicit_mlp(vis_feats).mean((-1,-2,-3))]
else:
sa_ind = []
self.vis_feats = vis_feats
logits = 2 * self.dropout(self.vis_feats) @ text_feats.T
final_feats = [sn_ind.to(self.device), tn_ind.to(self.device)]
for k in range(self.n_pairs):
pn_pair = logits[..., 2 * k : 2 * k + 2].float() #.softmax(-1)[...,0]
sa_ind += [torch.sigmoid(pn_pair[...,0] - pn_pair[...,1]).mean((-1,-2))]
final_feats += sa_ind
final_feats = torch.stack(final_feats, -1).float()
return final_feats, self.final_ln(final_feats).flatten()
def metrics(self, feats, outputs, gt):
np_feats = feats.mean(-1).detach().cpu().numpy()
np_outputs = outputs.detach().cpu().numpy()
np_gt = gt.numpy()
return spearmanr(np_feats, np_gt)[0], spearmanr(np_outputs, np_gt)[0]
def plcc_loss(y_pred, y):
sigma_hat, m_hat = torch.std_mean(y_pred, unbiased=False)
y_pred = (y_pred - m_hat) / (sigma_hat + 1e-8)
sigma, m = torch.std_mean(y, unbiased=False)
y = (y - m) / (sigma + 1e-8)
loss0 = torch.nn.functional.mse_loss(y_pred, y) / 4
rho = torch.mean(y_pred * y)
loss1 = torch.nn.functional.mse_loss(rho * y_pred, y) / 4
return ((loss0 + loss1) / 2).float()
def max_plcc_loss(y_pred, y):
return sum(plcc_loss(y_pred[:,i], y) for i in range(y_pred.shape[-1])) / y_pred.shape[-1]
def rescale(x):
x = np.array(x)
print("Mean:", x.mean(), "Std", x.std())
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(-x))
def count_parameters(model):
for name, module in model.named_children():
print(name, "|", sum(p.numel() for p in module.parameters() if p.requires_grad))
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def encode_text_prompts(prompts):
text_tokens = tokenizer(prompts).to("cuda")
with torch.no_grad():
embedding = model.token_embedding(text_tokens)
text_features = model.encode_text(text_tokens).float()
return text_tokens, embedding, text_features
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hyper-parameters')
parser.add_argument('--n_pairs', type=int, default=2, help='Number of pairs')
parser.add_argument("-i", '--implicit', action="store_true", help='Use implicit prompts')
parser.add_argument('-c', '--n_ctx', type=int, default=1, help='Number of context')
args = parser.parse_args()
n_pairs = args.n_pairs
implicit = args.implicit
n_ctx = args.n_ctx
with open("buona_vista_sa_index.yml", "r") as f:
opt = yaml.safe_load(f)
val_datasets = {}
for name, dataset in opt["data"].items():
val_datasets[name] = getattr(datasets, dataset["type"])(dataset["args"])
print("Loading model")
model, _, preprocess = open_clip.create_model_and_transforms("RN50",pretrained="openai")
model = model.to("cuda")
tokenizer = open_clip.get_tokenizer("RN50")
print("Loading features")
results = {}
gts, paths = {}, {}
for val_name, val_dataset in val_datasets.items():
gts[val_name] = [val_dataset.video_infos[i]["label"] for i in range(len(val_dataset))]
for val_name, val_dataset in val_datasets.items():
paths[val_name] = [val_dataset.video_infos[i]["filename"] for i in range(len(val_dataset))]
if not glob.glob("CLIP_vis_features.pt"):
visual_features = get_features()
visual_features = torch.load("CLIP_vis_features.pt")
backend = "Matlab" # Matlab | Pytorch
if backend == "Matlab":
with open("naturalnesses_matlab_results.pkl","rb") as f:
matlab_results = pkl.load(f)
sn = matlab_results["spatial"]
tn2 = matlab_results["temporal"]
else:
sn, tn2 = {}, {}
for val_name in visual_features:
with open(f"spatial_naturalness_{val_name}.pkl","rb") as infile:
sn[val_name] = pkl.load(infile)["pr_labels"]
with open("temporal_naturalness_pubs.pkl","rb") as infile:
tn = pkl.load(infile)
tn2[val_name] = tn[f"{val_name}"]["tn_index"]
context = " ".join(["X"] * n_ctx)
prompts = [
f"a {context} high quality photo",
f"a {context} low quality photo",
f"a {context} good photo",
f"a {context} bad photo",
]
print(n_pairs, implicit)
text_encoder = TextEncoder(model)
print(f'The model has {count_parameters(model):,} trainable parameters')
text_tokens, embedding, text_feats = encode_text_prompts(prompts)
snames = ["val-cvd2014", "val-kv1k", "val-livevqc", "val-ytugc", ]
print("Start training")
for sname in snames:
best_srccs, best_plccs = [], []
cross_snames = [] #name for name in snames if name != sname]
best_srccs_cross, best_plccs_cross = {}, {}
for cname in cross_snames:
best_srccs_cross[cname], best_plccs_cross[cname] = [], []
for split in range(10):
bvqi = BVQI(text_tokens, embedding, n_pairs=n_pairs, implicit=implicit).cuda()
print(f'The model has {count_parameters(bvqi):,} trainable parameters')
optimizer = torch.optim.AdamW(bvqi.parameters(),lr=1e-3)
random.seed((split+1)*42)
train_indices = random.sample(range(len(gts[sname])), int(0.8 * len(gts[sname])))
train_dataset = VisualFeatureDataset(sname, indices=train_indices)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True)
val_indices = [ind for ind in range(len(gts[sname])) if ind not in train_indices]
val_dataset = VisualFeatureDataset(sname, indices=val_indices)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=16)
cross_test_dataloaders = {}
for cname in cross_snames:
test_dataset = VisualFeatureDataset(cname)
cross_test_dataloaders[cname] = torch.utils.data.DataLoader(test_dataset, batch_size=16)
val_prs, val_gts = [], []
for data in (val_dataloader):
with torch.no_grad():
vis_feat, sn_ind, tn_ind, gt = data
_, res = bvqi(vis_feat, sn_ind, tn_ind, train=False)
val_prs.extend(list(res.cpu().numpy()))
val_gts.extend(list(gt.cpu().numpy()))
print(f"Split {split}, Bef Training SRCC:", spearmanr(val_prs,val_gts)[0], "Bef Training PLCC:", pearsonr(val_prs,val_gts)[0])
best_srcc, best_plcc = -1, -1
srccs_cross, plccs_cross = {}, {}
for epoch in tqdm(range(30)):
#print(f"Epoch {epoch}:")
bvqi.train()
for data in (train_dataloader):
optimizer.zero_grad()
vis_feat, sn_ind, tn_ind, gt = data
feats, res = bvqi(vis_feat, sn_ind, tn_ind)
loss = plcc_loss(res, gt.cuda().float()) #+ 0.3 * rank_loss(res, gt.cuda().float())
#aux_loss = max_plcc_loss(feats[...,2:], gt.cuda().float())
#loss += 0.3 * aux_loss
loss.backward()
optimizer.step()
bvqi.eval()
#val_prs, val_gts = [], []
#for data in (train_dataloader):
# with torch.no_grad():
# vis_feat, sn_ind, tn_ind, gt = data
# _, res = bvqi(vis_feat, sn_ind, tn_ind)
# val_prs.extend(list(res.cpu().numpy()))
# val_gts.extend(list(gt.cpu().numpy()))
#print("Train Spearman:", spearmanr(val_prs,val_gts)[0], "Train Pearson:", pearsonr(val_prs,val_gts)[0])
val_prs, val_gts = [], []
for data in (val_dataloader):
with torch.no_grad():
vis_feat, sn_ind, tn_ind, gt = data
_, res = bvqi(vis_feat, sn_ind, tn_ind, train=False)
val_prs.extend(list(res.cpu().numpy()))
val_gts.extend(list(gt.cpu().numpy()))
srcc, plcc = spearmanr(val_prs,val_gts)[0], pearsonr(val_prs,val_gts)[0]
if srcc + plcc > best_srcc + best_plcc:
best_srcc = srcc
best_plcc = plcc
test_prs, test_gts = {}, {}
for cname, test_dataloader in cross_test_dataloaders.items():
test_prs[cname], test_gts[cname] = [], []
for data in (test_dataloader):
with torch.no_grad():
vis_feat, sn_ind, tn_ind, gt = data
_, res = bvqi(vis_feat, sn_ind, tn_ind, train=False)
test_prs[cname].extend(list(res.cpu().numpy()))
test_gts[cname].extend(list(gt.cpu().numpy()))
csrcc, cplcc = spearmanr(test_prs[cname],test_gts[cname])[0], pearsonr(test_prs[cname],test_gts[cname])[0]
srccs_cross[cname] = csrcc
plccs_cross[cname] = cplcc
#print("Val Spearman:", srcc, "Val Pearson:", plcc, "Best Spearman:", best_srcc, "Best Pearson:", best_plcc, )
best_srccs.append(best_srcc)
best_plccs.append(best_plcc)
print("Best SRCC:", best_srcc, "Best PLCC:", best_plcc)
for cname in cross_snames:
print(f"{cname} SRCC:", srccs_cross[cname], f"{cname} PLCC:", plccs_cross[cname])
best_srccs_cross[cname] += [srccs_cross[cname]]
best_plccs_cross[cname] += [plccs_cross[cname]]
print(f"After training in 10 splits with seeds {[(i+1)*42 for i in range(10)]}:")
print(sname, "Avg Best SRCC:", np.mean(best_srccs), "Avg Best PLCC:", np.mean(best_plccs))
print(f"Cross dataset performance:")
print("Cross SRCC", [(key, np.mean(values)) for key, values in best_srccs_cross.items()])
print("Cross PLCC", [(key, np.mean(values)) for key, values in best_plccs_cross.items()])
| 17,790 | 39.251131 | 158 | py |
BVQI | BVQI-master/load_features.py | import os
import argparse
import pickle as pkl
import random
import open_clip
import numpy as np
import torch
import torch.nn as nn
import yaml
from scipy.stats import pearsonr, spearmanr
from scipy.stats import kendalltau as kendallr
from tqdm import tqdm
from buona_vista import datasets
import wandb
def rescale(x):
x = np.array(x)
print("Mean:", x.mean(), "Std", x.std())
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(-x))
def get_features(save_features=True):
with open("buona_vista_sa_index.yml", "r") as f:
opt = yaml.safe_load(f)
val_datasets = {}
for name, dataset in opt["data"].items():
val_datasets[name] = getattr(datasets, dataset["type"])(dataset["args"])
print(open_clip.list_pretrained())
model, _, _ = open_clip.create_model_and_transforms("RN50",pretrained="openai")
model = model.to("cuda")
print("loading succeed")
texts = [
"a high quality photo",
"a low quality photo",
"a good photo",
"a bad photo",
]
tokenizer = open_clip.get_tokenizer("RN50")
text_tokens = tokenizer(texts).to("cuda")
print(f"Prompt_loading_succeed, {texts}")
results = {}
gts, paths = {}, {}
for val_name, val_dataset in val_datasets.items():
gts[val_name] = [val_dataset.video_infos[i]["label"] for i in range(len(val_dataset))]
for val_name, val_dataset in val_datasets.items():
paths[val_name] = [val_dataset.video_infos[i]["filename"] for i in range(len(val_dataset))]
visual_features = {}
for val_name, val_dataset in val_datasets.items():
if val_name != "val-ltrain" and val_name != "val-l1080p":
visual_features[val_name] = []
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, num_workers=opt["num_workers"], pin_memory=True,
)
for i, data in enumerate(tqdm(val_loader, desc=f"Evaluating in dataset [{val_name}].")):
video_input = data["aesthetic"].to("cuda").squeeze(0).transpose(0,1)
with torch.no_grad():
video_features = model.encode_image(video_input)
visual_features[val_name].append(video_features.cpu())
if save_features:
torch.save(visual_features, "CLIP_vis_features.pt")
return visual_features
if __name__ == "__main__":
get_features() | 2,446 | 28.841463 | 100 | py |
BVQI | BVQI-master/pyiqa/api_helpers.py | import fnmatch
import re
from .default_model_configs import DEFAULT_CONFIGS
from .models.inference_model import InferenceModel
from .utils import get_root_logger
def create_metric(metric_name, as_loss=False, device=None, **kwargs):
assert (
metric_name in DEFAULT_CONFIGS.keys()
), f"Metric {metric_name} not implemented yet."
metric = InferenceModel(metric_name, as_loss, device, **kwargs)
logger = get_root_logger()
logger.info(f"Metric [{metric.net.__class__.__name__}] is created.")
return metric
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def list_models(metric_mode=None, filter="", exclude_filters=""):
"""Return list of available model names, sorted alphabetically
Args:
filter (str) - Wildcard filter string that works with fnmatch
exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter
Example:
model_list('*ssim*') -- returns all models including 'ssim'
"""
if metric_mode is None:
all_models = DEFAULT_CONFIGS.keys()
else:
assert metric_mode in [
"FR",
"NR",
], f"Metric mode only support [FR, NR], but got {metric_mode}"
all_models = [
key
for key in DEFAULT_CONFIGS.keys()
if DEFAULT_CONFIGS[key]["metric_mode"] == metric_mode
]
if filter:
models = []
include_filters = filter if isinstance(filter, (tuple, list)) else [filter]
for f in include_filters:
include_models = fnmatch.filter(all_models, f) # include these models
if len(include_models):
models = set(models).union(include_models)
else:
models = all_models
if exclude_filters:
if not isinstance(exclude_filters, (tuple, list)):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf) # exclude these models
if len(exclude_models):
models = set(models).difference(exclude_models)
return list(sorted(models, key=_natural_key))
| 2,217 | 35.360656 | 112 | py |
BVQI | BVQI-master/pyiqa/test.py | import logging
from os import path as osp
import torch
from pyiqa.data import build_dataloader, build_dataset
from pyiqa.models import build_model
from pyiqa.utils import get_env_info, get_root_logger, get_time_str, make_exp_dirs
from pyiqa.utils.options import dict2str, parse_options
def test_pipeline(root_path):
# parse options, set distributed setting, set ramdom seed
opt, _ = parse_options(root_path, is_train=False)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# mkdir and initialize loggers
make_exp_dirs(opt)
log_file = osp.join(opt["path"]["log"], f"test_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name="pyiqa", log_level=logging.INFO, log_file=log_file
)
logger.info(get_env_info())
logger.info(dict2str(opt))
# create test dataset and dataloader
test_loaders = []
for _, dataset_opt in sorted(opt["datasets"].items()):
test_set = build_dataset(dataset_opt)
test_loader = build_dataloader(
test_set,
dataset_opt,
num_gpu=opt["num_gpu"],
dist=opt["dist"],
sampler=None,
seed=opt["manual_seed"],
)
logger.info(f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
test_loaders.append(test_loader)
# create model
model = build_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt["name"]
logger.info(f"Testing {test_set_name}...")
model.validation(
test_loader,
current_iter=opt["name"],
tb_logger=None,
save_img=opt["val"]["save_img"],
)
if __name__ == "__main__":
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
test_pipeline(root_path)
| 1,864 | 30.083333 | 87 | py |
BVQI | BVQI-master/pyiqa/default_model_configs.py | import fnmatch
import re
from collections import OrderedDict
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
DEFAULT_CONFIGS = OrderedDict(
{
"ahiq": {
"metric_opts": {
"type": "AHIQ",
},
"metric_mode": "FR",
},
"ckdn": {
"metric_opts": {
"type": "CKDN",
},
"metric_mode": "FR",
},
"lpips": {
"metric_opts": {
"type": "LPIPS",
"net": "alex",
"version": "0.1",
},
"metric_mode": "FR",
"lower_better": True,
},
"lpips-vgg": {
"metric_opts": {
"type": "LPIPS",
"net": "vgg",
"version": "0.1",
},
"metric_mode": "FR",
"lower_better": True,
},
"dists": {
"metric_opts": {
"type": "DISTS",
},
"metric_mode": "FR",
"lower_better": True,
},
"ssim": {
"metric_opts": {
"type": "SSIM",
"downsample": False,
"test_y_channel": True,
},
"metric_mode": "FR",
},
"ssimc": {
"metric_opts": {
"type": "SSIM",
"downsample": False,
"test_y_channel": False,
},
"metric_mode": "FR",
},
"psnr": {
"metric_opts": {
"type": "PSNR",
"test_y_channel": False,
},
"metric_mode": "FR",
},
"psnry": {
"metric_opts": {
"type": "PSNR",
"test_y_channel": True,
},
"metric_mode": "FR",
},
"fsim": {
"metric_opts": {
"type": "FSIM",
"chromatic": True,
},
"metric_mode": "FR",
},
"ms_ssim": {
"metric_opts": {
"type": "MS_SSIM",
"downsample": False,
"test_y_channel": True,
"is_prod": True,
},
"metric_mode": "FR",
},
"vif": {
"metric_opts": {
"type": "VIF",
},
"metric_mode": "FR",
},
"gmsd": {
"metric_opts": {
"type": "GMSD",
"test_y_channel": True,
},
"metric_mode": "FR",
"lower_better": True,
},
"nlpd": {
"metric_opts": {
"type": "NLPD",
"channels": 1,
"test_y_channel": True,
},
"metric_mode": "FR",
"lower_better": True,
},
"vsi": {
"metric_opts": {
"type": "VSI",
},
"metric_mode": "FR",
},
"cw_ssim": {
"metric_opts": {
"type": "CW_SSIM",
"channels": 1,
"level": 4,
"ori": 8,
"test_y_channel": True,
},
"metric_mode": "FR",
},
"mad": {
"metric_opts": {
"type": "MAD",
"test_y_channel": True,
},
"metric_mode": "FR",
"lower_better": True,
},
# =============================================================
"niqe": {
"metric_opts": {
"type": "NIQE",
"test_y_channel": True,
},
"metric_mode": "NR",
"lower_better": True,
},
"ilniqe": {
"metric_opts": {
"type": "ILNIQE",
},
"metric_mode": "NR",
"lower_better": True,
},
"brisque": {
"metric_opts": {
"type": "BRISQUE",
"test_y_channel": True,
},
"metric_mode": "NR",
"lower_better": True,
},
"nrqm": {
"metric_opts": {
"type": "NRQM",
},
"metric_mode": "NR",
},
"pi": {
"metric_opts": {
"type": "PI",
},
"metric_mode": "NR",
"lower_better": True,
},
"cnniqa": {
"metric_opts": {"type": "CNNIQA", "pretrained": "koniq10k"},
"metric_mode": "NR",
},
"musiq": {
"metric_opts": {"type": "MUSIQ", "pretrained": "koniq10k"},
"metric_mode": "NR",
},
"musiq-ava": {
"metric_opts": {"type": "MUSIQ", "pretrained": "ava"},
"metric_mode": "NR",
},
"musiq-koniq": {
"metric_opts": {"type": "MUSIQ", "pretrained": "koniq10k"},
"metric_mode": "NR",
},
"musiq-paq2piq": {
"metric_opts": {"type": "MUSIQ", "pretrained": "paq2piq"},
"metric_mode": "NR",
},
"musiq-spaq": {
"metric_opts": {"type": "MUSIQ", "pretrained": "spaq"},
"metric_mode": "NR",
},
"nima": {
"metric_opts": {
"type": "NIMA",
"pretrained": "ava",
"base_model_name": "inception_resnet_v2",
},
"metric_mode": "NR",
},
"nima-vgg16-ava": {
"metric_opts": {
"type": "NIMA",
"pretrained": "ava",
"base_model_name": "vgg16",
},
"metric_mode": "NR",
},
"pieapp": {
"metric_opts": {
"type": "PieAPP",
},
"metric_mode": "FR",
"lower_better": True,
},
"paq2piq": {
"metric_opts": {
"type": "PAQ2PIQ",
},
"metric_mode": "NR",
},
"dbcnn": {
"metric_opts": {"type": "DBCNN", "pretrained": "koniq"},
"metric_mode": "NR",
},
"fid": {
"metric_opts": {
"type": "FID",
},
"metric_mode": "NR",
},
"maniqa": {
"metric_opts": {
"type": "MANIQA",
},
"metric_mode": "NR",
},
}
)
| 6,554 | 25.431452 | 72 | py |
BVQI | BVQI-master/pyiqa/version.py | # GENERATED VERSION FILE
# TIME: Wed Dec 7 13:57:45 2022
__version__ = "0.1.5"
__gitsha__ = "3619109"
version_info = (0, 1, 5)
| 128 | 20.5 | 32 | py |
BVQI | BVQI-master/pyiqa/train_nsplits.py | import datetime
import logging
import os
import time
from os import path as osp
import numpy as np
import torch
from pyiqa.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from pyiqa.models import build_model
from pyiqa.train import create_train_val_dataloader, init_tb_loggers, train_pipeline
from pyiqa.utils import (
AvgTimer,
MessageLogger,
get_env_info,
get_root_logger,
get_time_str,
make_exp_dirs,
mkdir_and_rename,
)
from pyiqa.utils.options import copy_opt_file, dict2str, make_paths, parse_options
def train_nsplits(root_path):
torch.backends.cudnn.benchmark = True
opt, args = parse_options(root_path, is_train=True)
n_splits = opt["split_num"]
save_path = opt["save_final_results_path"]
os.makedirs(os.path.dirname(save_path), exist_ok=True)
all_split_results = []
prefix_name = opt["name"]
for i in range(n_splits):
# update split specific options
opt["name"] = prefix_name + f"_Split{i:02d}"
make_paths(opt, root_path)
for k in opt["datasets"].keys():
opt["datasets"][k]["split_index"] = i + 1
tmp_results = train_pipeline(root_path, opt, args)
all_split_results.append(tmp_results)
with open(save_path, "w") as sf:
datasets = list(all_split_results[0].keys())
metrics = list(all_split_results[0][datasets[0]].keys())
print(datasets, metrics)
sf.write("Val Datasets\tSplits\t{}\n".format("\t".join(metrics)))
for ds in datasets:
all_results = []
for i in range(n_splits):
results_msg = f"{ds}\t{i:02d}\t"
tmp_metric_results = []
for mt in metrics:
tmp_metric_results.append(all_split_results[i][ds][mt]["val"])
results_msg += f"{all_split_results[i][ds][mt]['val']:04f}\t"
results_msg += f"@{all_split_results[i][ds][mt]['iter']:05d}\n"
sf.write(results_msg)
all_results.append(tmp_metric_results)
results_avg = np.array(all_results).mean(axis=0)
results_std = np.array(all_results).std(axis=0)
sf.write(f"Overall results in {ds}: {results_avg}\t{results_std}\n")
if __name__ == "__main__":
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_nsplits(root_path)
| 2,387 | 34.641791 | 84 | py |
BVQI | BVQI-master/pyiqa/__init__.py | # flake8: noqa
from .api_helpers import *
from .archs import *
from .data import *
from .default_model_configs import *
from .losses import *
from .matlab_utils import *
from .metrics import *
from .models import *
from .test import *
from .train import *
from .utils import *
from .version import __gitsha__, __version__
| 322 | 22.071429 | 44 | py |
BVQI | BVQI-master/pyiqa/train.py | import datetime
import logging
import math
import os
import time
from os import path as osp
import torch
from pyiqa.data import build_dataloader, build_dataset
from pyiqa.data.data_sampler import EnlargedSampler
from pyiqa.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from pyiqa.models import build_model
from pyiqa.utils import (
AvgTimer,
MessageLogger,
check_resume,
get_env_info,
get_root_logger,
get_time_str,
init_tb_logger,
init_wandb_logger,
make_exp_dirs,
mkdir_and_rename,
scandir,
)
from pyiqa.utils.options import copy_opt_file, dict2str, parse_options
def init_tb_loggers(opt):
# initialize wandb logger before tensorboard logger to allow proper sync
if (
(opt["logger"].get("wandb") is not None)
and (opt["logger"]["wandb"].get("project") is not None)
and ("debug" not in opt["name"])
):
assert (
opt["logger"].get("use_tb_logger") is True
), "should turn on tensorboard when using wandb"
init_wandb_logger(opt)
tb_logger = None
if opt["logger"].get("use_tb_logger") and "debug" not in opt["name"]:
tb_logger = init_tb_logger(
log_dir=osp.join(opt["root_path"], "tb_logger", opt["name"])
)
return tb_logger
def create_train_val_dataloader(opt, logger):
# create train and val dataloaders
train_loader, val_loaders = None, []
for phase, dataset_opt in opt["datasets"].items():
if phase == "train":
dataset_enlarge_ratio = dataset_opt.get("dataset_enlarge_ratio", 1)
train_set = build_dataset(dataset_opt)
train_sampler = EnlargedSampler(
train_set,
opt["world_size"],
opt["rank"],
dataset_enlarge_ratio,
dataset_opt.get("use_shuffle", True),
)
train_loader = build_dataloader(
train_set,
dataset_opt,
num_gpu=opt["num_gpu"],
dist=opt["dist"],
sampler=train_sampler,
seed=opt["manual_seed"],
)
num_iter_per_epoch = math.ceil(
len(train_set)
* dataset_enlarge_ratio
/ (dataset_opt["batch_size_per_gpu"] * opt["world_size"])
)
total_epochs = opt["train"].get("total_epoch", None)
if total_epochs is not None:
total_epochs = int(total_epochs)
total_iters = total_epochs * (num_iter_per_epoch)
opt["train"]["total_iter"] = total_iters
else:
total_iters = int(opt["train"]["total_iter"])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
logger.info(
"Training statistics:"
f"\n\tNumber of train images: {len(train_set)}"
f"\n\tDataset enlarge ratio: {dataset_enlarge_ratio}"
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
f'\n\tWorld size (gpu number): {opt["world_size"]}'
f"\n\tRequire iter number per epoch: {num_iter_per_epoch}"
f"\n\tTotal epochs: {total_epochs}; iters: {total_iters}."
)
elif phase.split("_")[0] == "val":
val_set = build_dataset(dataset_opt)
val_loader = build_dataloader(
val_set,
dataset_opt,
num_gpu=opt["num_gpu"],
dist=opt["dist"],
sampler=None,
seed=opt["manual_seed"],
)
logger.info(
f'Number of val images/folders in {dataset_opt["name"]}: {len(val_set)}'
)
val_loaders.append(val_loader)
else:
raise ValueError(f"Dataset phase {phase} is not recognized.")
return train_loader, train_sampler, val_loaders, total_epochs, total_iters
def load_resume_state(opt):
resume_state_path = None
if opt["auto_resume"]:
state_path = osp.join("experiments", opt["name"], "training_states")
if osp.isdir(state_path):
states = list(
scandir(state_path, suffix="state", recursive=False, full_path=False)
)
if len(states) != 0:
states = [float(v.split(".state")[0]) for v in states]
resume_state_path = osp.join(state_path, f"{max(states):.0f}.state")
opt["path"]["resume_state"] = resume_state_path
else:
if opt["path"].get("resume_state"):
resume_state_path = opt["path"]["resume_state"]
if resume_state_path is None:
resume_state = None
else:
device_id = torch.cuda.current_device()
resume_state = torch.load(
resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id)
)
check_resume(opt, resume_state["iter"])
return resume_state
def train_pipeline(root_path, opt=None, args=None):
# parse options, set distributed setting, set random seed
if opt is None and args is None:
opt, args = parse_options(root_path, is_train=True)
opt["root_path"] = root_path
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# load resume states if necessary
resume_state = load_resume_state(opt)
# mkdir for experiments and logger
if resume_state is None:
make_exp_dirs(opt)
if (
opt["logger"].get("use_tb_logger")
and "debug" not in opt["name"]
and opt["rank"] == 0
):
os.makedirs(osp.join(opt["root_path"], "tb_logger_archived"), exist_ok=True)
mkdir_and_rename(osp.join(opt["root_path"], "tb_logger", opt["name"]))
# copy the yml file to the experiment root
copy_opt_file(args.opt, opt["path"]["experiments_root"])
# WARNING: should not use get_root_logger in the above codes, including the called functions
# Otherwise the logger will not be properly initialized
log_file = osp.join(opt["path"]["log"], f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name="pyiqa", log_level=logging.INFO, log_file=log_file
)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize wandb and tb loggers
tb_logger = init_tb_loggers(opt)
# create train and validation dataloaders
result = create_train_val_dataloader(opt, logger)
train_loader, train_sampler, val_loaders, total_epochs, total_iters = result
# create model
model = build_model(opt)
if resume_state: # resume training
model.resume_training(resume_state) # handle optimizers and schedulers
logger.info(
f"Resuming training from epoch: {resume_state['epoch']}, "
f"iter: {resume_state['iter']}."
)
start_epoch = resume_state["epoch"]
current_iter = resume_state["iter"]
else:
start_epoch = 0
current_iter = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# dataloader prefetcher
prefetch_mode = opt["datasets"]["train"].get("prefetch_mode")
if prefetch_mode is None or prefetch_mode == "cpu":
prefetcher = CPUPrefetcher(train_loader)
elif prefetch_mode == "cuda":
prefetcher = CUDAPrefetcher(train_loader, opt)
logger.info(f"Use {prefetch_mode} prefetch dataloader")
if opt["datasets"]["train"].get("pin_memory") is not True:
raise ValueError("Please set pin_memory=True for CUDAPrefetcher.")
else:
raise ValueError(
f"Wrong prefetch_mode {prefetch_mode}."
"Supported ones are: None, 'cuda', 'cpu'."
)
# training
logger.info(f"Start training from epoch: {start_epoch}, iter: {current_iter}")
data_timer, iter_timer = AvgTimer(), AvgTimer()
start_time = time.time()
for epoch in range(start_epoch, total_epochs + 1):
train_sampler.set_epoch(epoch)
prefetcher.reset()
train_data = prefetcher.next()
while train_data is not None:
data_timer.record()
current_iter += 1
if current_iter > total_iters:
break
# update learning rate
# model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_timer.record()
if current_iter == 1:
# reset start time in msg_logger for more accurate eta_time
# not work in resume mode
msg_logger.reset_start_time()
# log
if current_iter % opt["logger"]["print_freq"] == 0:
log_vars = {"epoch": epoch, "iter": current_iter}
log_vars.update({"lrs": model.get_current_learning_rate()})
log_vars.update(
{
"time": iter_timer.get_avg_time(),
"data_time": data_timer.get_avg_time(),
}
)
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# log images
log_img_freq = opt["logger"].get("log_imgs_freq", 1e99)
if current_iter % log_img_freq == 0:
visual_imgs = model.get_current_visuals()
if tb_logger and visual_imgs is not None:
for k, v in visual_imgs.items():
tb_logger.add_images(
f"ckpt_imgs/{k}", v.clamp(0, 1), current_iter
)
# save models and training states
save_ckpt_freq = opt["logger"].get("save_checkpoint_freq", 9e9)
if current_iter % save_ckpt_freq == 0:
logger.info("Saving models and training states.")
model.save(epoch, current_iter)
if current_iter % opt["logger"]["save_latest_freq"] == 0:
logger.info("Saving latest models and training states.")
model.save(epoch, -1)
# validation
if opt.get("val") is not None and (
current_iter % opt["val"]["val_freq"] == 0
):
if len(val_loaders) > 1:
logger.warning(
"Multiple validation datasets are *only* supported by SRModel."
)
for val_loader in val_loaders:
model.validation(
val_loader, current_iter, tb_logger, opt["val"]["save_img"]
)
data_timer.start()
iter_timer.start()
train_data = prefetcher.next()
# end of iter
# use epoch based learning rate scheduler
model.update_learning_rate(
epoch + 2, warmup_iter=opt["train"].get("warmup_iter", -1)
)
# end of epoch
consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
logger.info(f"End of training. Time consumed: {consumed_time}")
logger.info("Save the latest model.")
model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
if opt.get("val") is not None:
for val_loader in val_loaders:
model.validation(
val_loader, current_iter, tb_logger, opt["val"]["save_img"]
)
if tb_logger:
tb_logger.close()
return model.best_metric_results
if __name__ == "__main__":
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_pipeline(root_path)
| 11,816 | 36.39557 | 103 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/functions.py | import math
import numpy as np
import torch
import torch.nn.functional as F
from pyiqa.archs.arch_util import ExactPadding2d, symm_pad, to_2tuple
def fspecial(size=None, sigma=None, channels=1, filter_type="gaussian"):
r"""Function same as 'fspecial' in MATLAB, only support gaussian now.
Args:
size (int or tuple): size of window
sigma (float): sigma of gaussian
channels (int): channels of output
"""
if filter_type == "gaussian":
shape = to_2tuple(size)
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
h = torch.from_numpy(h).float().repeat(channels, 1, 1, 1)
return h
else:
raise NotImplementedError(
f"Only support gaussian filter now, got {filter_type}"
)
def conv2d(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""Matlab like conv2, weights needs to be flipped.
Args:
input (tensor): (b, c, h, w)
weight (tensor): (out_ch, in_ch, kh, kw), conv weight
bias (bool or None): bias
stride (int or tuple): conv stride
padding (str): padding mode
dilation (int): conv dilation
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
weight = torch.flip(weight, dims=(-1, -2))
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def imfilter(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""imfilter same as matlab.
Args:
input (tensor): (b, c, h, w) tensor to be filtered
weight (tensor): (out_ch, in_ch, kh, kw) filter kernel
padding (str): padding mode
dilation (int): dilation of conv
groups (int): groups of conv
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def filter2(input, weight, shape="same"):
if shape == "same":
return imfilter(input, weight, groups=input.shape[1])
elif shape == "valid":
return F.conv2d(input, weight, stride=1, padding=0, groups=input.shape[1])
else:
raise NotImplementedError(f"Shape type {shape} is not implemented.")
def dct(x, norm=None):
"""
Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
Args:
x: the input signal
norm: the normalization, None or 'ortho'
Return:
the DCT-II of the signal over the last dimension
"""
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=-1)
Vc = torch.view_as_real(torch.fft.fft(v, dim=-1))
k = -torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i
if norm == "ortho":
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
def dct2d(x, norm="ortho"):
"""
2-dimentional Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
:param x: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last 2 dimensions
"""
X1 = dct(x, norm=norm)
X2 = dct(X1.transpose(-1, -2), norm=norm)
return X2.transpose(-1, -2)
def fitweibull(x, iters=50, eps=1e-2):
"""Simulate wblfit function in matlab.
ref: https://github.com/mlosch/python-weibullfit/blob/master/weibull/backend_pytorch.py
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x (tensor): (B, N), batch of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:param use_cuda: Use gpu
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = torch.log(x)
k = 1.2 / torch.std(ln_x, dim=1, keepdim=True)
k_t_1 = k
for t in range(iters):
# Partial derivative df/dk
x_k = x ** k.repeat(1, x.shape[1])
x_k_ln_x = x_k * ln_x
ff = torch.sum(x_k_ln_x, dim=-1, keepdim=True)
fg = torch.sum(x_k, dim=-1, keepdim=True)
f1 = torch.mean(ln_x, dim=-1, keepdim=True)
f = ff / fg - f1 - (1.0 / k)
ff_prime = torch.sum(x_k_ln_x * ln_x, dim=-1, keepdim=True)
fg_prime = ff
f_prime = (ff_prime / fg - (ff / fg * fg_prime / fg)) + (1.0 / (k * k))
# Newton-Raphson method k = k - f(k;x)/f'(k;x)
k = k - f / f_prime
error = torch.abs(k - k_t_1).max().item()
if error < eps:
break
k_t_1 = k
# Lambda (scale) can be calculated directly
lam = torch.mean(x ** k.repeat(1, x.shape[1]), dim=-1, keepdim=True) ** (1.0 / k)
return torch.cat((k, lam), dim=1) # Shape (SC), Scale (FE)
def cov(tensor, rowvar=True, bias=False):
r"""Estimate a covariance matrix (np.cov)
Ref: https://gist.github.com/ModarTensai/5ab449acba9df1a26c12060240773110
"""
tensor = tensor if rowvar else tensor.transpose(-1, -2)
tensor = tensor - tensor.mean(dim=-1, keepdim=True)
if tensor.shape[-1] > 1:
factor = 1 / (tensor.shape[-1] - int(not bool(bias)))
else:
factor = 1
return factor * tensor @ tensor.transpose(-1, -2)
def nancov(x):
r"""Calculate nancov for batched tensor, rows that contains nan value
will be removed.
Args:
x (tensor): (B, row_num, feat_dim)
Return:
cov (tensor): (B, feat_dim, feat_dim)
"""
assert (
len(x.shape) == 3
), f"Shape of input should be (batch_size, row_num, feat_dim), but got {x.shape}"
b, rownum, feat_dim = x.shape
nan_mask = torch.isnan(x).any(dim=2, keepdim=True)
cov_x = []
for i in range(b):
x_no_nan = x[i].masked_select(~nan_mask[i]).reshape(-1, feat_dim)
cov_x.append(cov(x_no_nan, rowvar=False))
return torch.stack(cov_x)
def nanmean(v, *args, inplace=False, **kwargs):
r"""nanmean same as matlab function: calculate mean values by removing all nan."""
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
def im2col(x, kernel, mode="sliding"):
r"""simple im2col as matlab
Args:
x (Tensor): shape (b, c, h, w)
kernel (int): kernel size
mode (string):
- sliding (default): rearranges sliding image neighborhoods of kernel size into columns with no zero-padding
- distinct: rearranges discrete image blocks of kernel size into columns, zero pad right and bottom if necessary
Return:
flatten patch (Tensor): (b, h * w / kernel **2, kernel * kernel)
"""
b, c, h, w = x.shape
kernel = to_2tuple(kernel)
if mode == "sliding":
stride = 1
elif mode == "distinct":
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
x = F.pad(x, (0, pad_col, 0, pad_row))
else:
raise NotImplementedError(f"Type {mode} is not implemented yet.")
patches = F.unfold(x, kernel, dilation=1, stride=stride)
b, _, pnum = patches.shape
patches = patches.transpose(1, 2).reshape(b, pnum, -1)
return patches
def blockproc(
x, kernel, fun, border_size=None, pad_partial=False, pad_method="zero", **func_args
):
r"""blockproc function like matlab
Difference:
- Partial blocks is discarded (if exist) for fast GPU process.
Args:
x (tensor): shape (b, c, h, w)
kernel (int or tuple): block size
func (function): function to process each block
border_size (int or tuple): border pixels to each block
pad_partial: pad partial blocks to make them full-sized, default False
pad_method: [zero, replicate, symmetric] how to pad partial block when pad_partial is set True
Return:
results (tensor): concatenated results of each block
"""
assert len(x.shape) == 4, f"Shape of input has to be (b, c, h, w) but got {x.shape}"
kernel = to_2tuple(kernel)
if pad_partial:
b, c, h, w = x.shape
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
padding = (0, pad_col, 0, pad_row)
if pad_method == "zero":
x = F.pad(x, padding, mode="constant")
elif pad_method == "symmetric":
x = symm_pad(x, padding)
else:
x = F.pad(x, padding, mode=pad_method)
if border_size is not None:
raise NotImplementedError("Blockproc with border is not implemented yet")
else:
b, c, h, w = x.shape
block_size_h, block_size_w = kernel
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
# extract blocks in (row, column) manner, i.e., stored with column first
blocks = F.unfold(x, kernel, stride=kernel)
blocks = blocks.reshape(b, c, *kernel, num_block_h, num_block_w)
blocks = blocks.permute(5, 4, 0, 1, 2, 3).reshape(
num_block_h * num_block_w * b, c, *kernel
)
results = fun(blocks, func_args)
results = results.reshape(
num_block_h * num_block_w, b, *results.shape[1:]
).transpose(0, 1)
return results
| 10,439 | 33.569536 | 124 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/math_util.py | r"""Mathematical utilities
Created by: https://github.com/tomrunia/PyTorchSteerablePyramid/blob/master/steerable/math_utils.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
def abs(x):
return torch.sqrt(x[..., 0] ** 2 + x[..., 1] ** 2 + 1e-12)
def roll_n(X, axis, n):
f_idx = tuple(
slice(None, None, None) if i != axis else slice(0, n, None)
for i in range(X.dim())
)
b_idx = tuple(
slice(None, None, None) if i != axis else slice(n, None, None)
for i in range(X.dim())
)
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def batch_fftshift2d(x):
"""Args:
x: An complex tensor. Shape :math:`(N, C, H, W)`.
Pytroch version >= 1.8.0
"""
real, imag = x.real, x.imag
for dim in range(1, len(real.size())):
n_shift = real.size(dim) // 2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = roll_n(real, axis=dim, n=n_shift)
imag = roll_n(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def batch_ifftshift2d(x):
"""Args:
x: An input tensor. Shape :math:`(N, C, H, W, 2)`.
Return:
An complex tensor. Shape :math:`(N, C, H, W)`.
"""
real, imag = torch.unbind(x, -1)
for dim in range(len(real.size()) - 1, 0, -1):
real = roll_n(real, axis=dim, n=real.size(dim) // 2)
imag = roll_n(imag, axis=dim, n=imag.size(dim) // 2)
return torch.complex(real, imag) # convert to complex (real&imag)
def prepare_grid(m, n):
x = np.linspace(
-(m // 2) / (m / 2), (m // 2) / (m / 2) - (1 - m % 2) * 2 / m, num=m
)
y = np.linspace(
-(n // 2) / (n / 2), (n // 2) / (n / 2) - (1 - n % 2) * 2 / n, num=n
)
xv, yv = np.meshgrid(y, x)
angle = np.arctan2(yv, xv)
rad = np.sqrt(xv ** 2 + yv ** 2)
rad[m // 2][n // 2] = rad[m // 2][n // 2 - 1]
log_rad = np.log2(rad)
return log_rad, angle
def rcosFn(width, position):
N = 256 # abritrary
X = np.pi * np.array(range(-N - 1, 2)) / 2 / N
Y = np.cos(X) ** 2
Y[0] = Y[1]
Y[N + 2] = Y[N + 1]
X = position + 2 * width / np.pi * (X + np.pi / 4)
return X, Y
def pointOp(im, Y, X):
out = np.interp(im.flatten(), X, Y)
return np.reshape(out, im.shape)
def getlist(coeff):
straight = [bands for scale in coeff[1:-1] for bands in scale]
straight = [coeff[0]] + straight + [coeff[-1]]
return straight
| 2,611 | 26.787234 | 99 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/scfpyr_util.py | r"""Complex-valued steerable pyramid
Created by: https://github.com/tomrunia/PyTorchSteerablePyramid
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
- Offical Matlab code from https://github.com/LabForComputationalVision/matlabPyrTools/blob/master/buildSCFpyr.m;
- Original Python code from https://github.com/LabForComputationalVision/pyPyrTools/blob/master/pyPyrTools/SCFpyr.py;
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
from scipy.special import factorial
from . import math_util
pointOp = math_util.pointOp
################################################################################
################################################################################
class SCFpyr_PyTorch(object):
"""
This is a modified version of buildSFpyr, that constructs a
complex-valued steerable pyramid using Hilbert-transform pairs
of filters. Note that the imaginary parts will *not* be steerable.
Pytorch version >= 1.8.0
"""
def __init__(self, height=5, nbands=4, scale_factor=2, device=None):
self.height = height # including low-pass and high-pass
self.nbands = nbands # number of orientation bands
self.scale_factor = scale_factor
self.device = torch.device("cpu") if device is None else device
# Cache constants
self.lutsize = 1024
self.Xcosn = (
np.pi
* np.array(range(-(2 * self.lutsize + 1), (self.lutsize + 2)))
/ self.lutsize
)
self.alpha = (self.Xcosn + np.pi) % (2 * np.pi) - np.pi
self.complex_fact_construct = np.power(np.complex(0, -1), self.nbands - 1)
self.complex_fact_reconstruct = np.power(np.complex(0, 1), self.nbands - 1)
################################################################################
# Construction of Steerable Pyramid
def build(self, im_batch):
"""Decomposes a batch of images into a complex steerable pyramid.
The pyramid typically has ~4 levels and 4-8 orientations.
Args:
im_batch (torch.Tensor): Batch of images of shape [N,C,H,W]
Returns:
pyramid: list containing torch.Tensor objects storing the pyramid
"""
assert (
im_batch.device == self.device
), "Devices invalid (pyr = {}, batch = {})".format(self.device, im_batch.device)
assert im_batch.dtype == torch.float32, "Image batch must be torch.float32"
assert im_batch.dim() == 4, "Image batch must be of shape [N,C,H,W]"
assert (
im_batch.shape[1] == 1
), "Second dimension must be 1 encoding grayscale image"
im_batch = im_batch.squeeze(1) # flatten channels dim
height, width = im_batch.shape[1], im_batch.shape[2]
# Check whether image size is sufficient for number of levels
if self.height > int(np.floor(np.log2(min(width, height))) - 2):
raise RuntimeError(
"Cannot build {} levels, image too small.".format(self.height)
)
# Prepare a grid
log_rad, angle = math_util.prepare_grid(height, width)
# Radial transition function (a raised cosine in log-frequency):
Xrcos, Yrcos = math_util.rcosFn(1, -0.5)
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(1 - Yrcos ** 2)
lo0mask = pointOp(log_rad, YIrcos, Xrcos)
hi0mask = pointOp(log_rad, Yrcos, Xrcos)
# Note that we expand dims to support broadcasting later
lo0mask = torch.from_numpy(lo0mask).float()[None, :, :, None].to(self.device)
hi0mask = torch.from_numpy(hi0mask).float()[None, :, :, None].to(self.device)
# Fourier transform (2D) and shifting
batch_dft = torch.fft.fft2(im_batch)
batch_dft = math_util.batch_fftshift2d(batch_dft)
# Low-pass
lo0dft = batch_dft * lo0mask
# Start recursively building the pyramids
coeff = self._build_levels(lo0dft, log_rad, angle, Xrcos, Yrcos, self.height)
# High-pass
hi0dft = batch_dft * hi0mask
hi0 = math_util.batch_ifftshift2d(hi0dft)
hi0 = torch.fft.ifft2(hi0)
hi0_real = hi0.real
coeff.insert(0, hi0_real)
return coeff
def _build_levels(self, lodft, log_rad, angle, Xrcos, Yrcos, height):
if height <= 0:
# Low-pass
lo0 = math_util.batch_ifftshift2d(lodft)
lo0 = torch.fft.ifft2(lo0)
lo0_real = lo0.real
coeff = [lo0_real]
else:
Xrcos = Xrcos - np.log2(self.scale_factor)
####################################################################
####################### Orientation bandpass #######################
####################################################################
himask = pointOp(log_rad, Yrcos, Xrcos)
himask = torch.from_numpy(himask[None, :, :, None]).float().to(self.device)
order = self.nbands - 1
const = (
np.power(2, 2 * order)
* np.square(factorial(order))
/ (self.nbands * factorial(2 * order))
)
Ycosn = (
2
* np.sqrt(const)
* np.power(np.cos(self.Xcosn), order)
* (np.abs(self.alpha) < np.pi / 2)
) # [n,]
# Loop through all orientation bands
orientations = []
for b in range(self.nbands):
anglemask = pointOp(angle, Ycosn, self.Xcosn + np.pi * b / self.nbands)
anglemask = anglemask[None, :, :, None] # for broadcasting
anglemask = torch.from_numpy(anglemask).float().to(self.device)
# Bandpass filtering
banddft = lodft * anglemask * himask
# Now multiply with complex number
# (x+yi)(u+vi) = (xu-yv) + (xv+yu)i
banddft = torch.unbind(banddft, -1)
banddft_real = (
self.complex_fact_construct.real * banddft[0]
- self.complex_fact_construct.imag * banddft[1]
)
banddft_imag = (
self.complex_fact_construct.real * banddft[1]
+ self.complex_fact_construct.imag * banddft[0]
)
banddft = torch.stack((banddft_real, banddft_imag), -1)
band = math_util.batch_ifftshift2d(banddft)
band = torch.fft.ifft2(band)
orientations.append(torch.stack((band.real, band.imag), -1))
####################################################################
######################## Subsample lowpass #########################
####################################################################
# Don't consider batch_size and imag/real dim
dims = np.array(lodft.shape[1:3])
# Both are tuples of size 2
low_ind_start = (
np.ceil((dims + 0.5) / 2)
- np.ceil((np.ceil((dims - 0.5) / 2) + 0.5) / 2)
).astype(int)
low_ind_end = (low_ind_start + np.ceil((dims - 0.5) / 2)).astype(int)
# Subsampling indices
log_rad = log_rad[
low_ind_start[0] : low_ind_end[0], low_ind_start[1] : low_ind_end[1]
]
angle = angle[
low_ind_start[0] : low_ind_end[0], low_ind_start[1] : low_ind_end[1]
]
# Actual subsampling
lodft = lodft[
:,
low_ind_start[0] : low_ind_end[0],
low_ind_start[1] : low_ind_end[1],
:,
]
# Filtering
YIrcos = np.abs(np.sqrt(1 - Yrcos ** 2))
lomask = pointOp(log_rad, YIrcos, Xrcos)
lomask = torch.from_numpy(lomask[None, :, :, None]).float()
lomask = lomask.to(self.device)
# Convolution in spatial domain
lodft = lomask * lodft
####################################################################
####################### Recursion next level #######################
####################################################################
coeff = self._build_levels(lodft, log_rad, angle, Xrcos, Yrcos, height - 1)
coeff.insert(0, orientations)
return coeff
| 8,552 | 36.678414 | 121 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/__init__.py | """This folder contains pytorch implementations of matlab functions.
And should produce the same results as matlab.
Note: to enable GPU acceleration, all functions take batched tensors as inputs,
and return batched results.
"""
from .functions import *
from .resize import imresize
from .scfpyr_util import SCFpyr_PyTorch
__all__ = [
"imresize",
"fspecial",
"SCFpyr_PyTorch",
"imfilter",
"dct2d",
"conv2d",
"filter2",
"fitweibull",
"nancov",
"nanmean",
"im2col",
"blockproc",
]
| 529 | 19.384615 | 79 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/resize.py | """
A standalone PyTorch implementation for fast and efficient bicubic resampling.
The resulting values are the same to MATLAB function imresize('bicubic').
## Author: Sanghyun Son
## Email: [email protected] (primary), [email protected] (secondary)
## Version: 1.2.0
## Last update: July 9th, 2020 (KST)
Dependency: torch
Example::
>>> import torch
>>> import core
>>> x = torch.arange(16).float().view(1, 1, 4, 4)
>>> y = core.imresize(x, sizes=(3, 3))
>>> print(y)
tensor([[[[ 0.7506, 2.1004, 3.4503],
[ 6.1505, 7.5000, 8.8499],
[11.5497, 12.8996, 14.2494]]]])
"""
import math
import typing
import torch
from torch.nn import functional as F
__all__ = ["imresize"]
_I = typing.Optional[int]
_D = typing.Optional[torch.dtype]
def nearest_contribution(x: torch.Tensor) -> torch.Tensor:
range_around_0 = torch.logical_and(x.gt(-0.5), x.le(0.5))
cont = range_around_0.to(dtype=x.dtype)
return cont
def linear_contribution(x: torch.Tensor) -> torch.Tensor:
ax = x.abs()
range_01 = ax.le(1)
cont = (1 - ax) * range_01.to(dtype=x.dtype)
return cont
def cubic_contribution(x: torch.Tensor, a: float = -0.5) -> torch.Tensor:
ax = x.abs()
ax2 = ax * ax
ax3 = ax * ax2
range_01 = ax.le(1)
range_12 = torch.logical_and(ax.gt(1), ax.le(2))
cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1
cont_01 = cont_01 * range_01.to(dtype=x.dtype)
cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a)
cont_12 = cont_12 * range_12.to(dtype=x.dtype)
cont = cont_01 + cont_12
return cont
def gaussian_contribution(x: torch.Tensor, sigma: float = 2.0) -> torch.Tensor:
range_3sigma = x.abs() <= 3 * sigma + 1
# Normalization will be done after
cont = torch.exp(-x.pow(2) / (2 * sigma ** 2))
cont = cont * range_3sigma.to(dtype=x.dtype)
return cont
def discrete_kernel(
kernel: str, scale: float, antialiasing: bool = True
) -> torch.Tensor:
"""
For downsampling with integer scale only.
"""
downsampling_factor = int(1 / scale)
if kernel == "cubic":
kernel_size_orig = 4
else:
raise ValueError("Pass!")
if antialiasing:
kernel_size = kernel_size_orig * downsampling_factor
else:
kernel_size = kernel_size_orig
if downsampling_factor % 2 == 0:
a = kernel_size_orig * (0.5 - 1 / (2 * kernel_size))
else:
kernel_size -= 1
a = kernel_size_orig * (0.5 - 1 / (kernel_size + 1))
with torch.no_grad():
r = torch.linspace(-a, a, steps=kernel_size)
k = cubic_contribution(r).view(-1, 1)
k = torch.matmul(k, k.t())
k /= k.sum()
return k
def reflect_padding(
x: torch.Tensor, dim: int, pad_pre: int, pad_post: int
) -> torch.Tensor:
"""
Apply reflect padding to the given Tensor.
Note that it is slightly different from the PyTorch functional.pad,
where boundary elements are used only once.
Instead, we follow the MATLAB implementation
which uses boundary elements twice.
For example,
[a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation,
while our implementation yields [a, a, b, c, d, d].
"""
b, c, h, w = x.size()
if dim == 2 or dim == -2:
padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w)
padding_buffer[..., pad_pre : (h + pad_pre), :].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :])
for p in range(pad_post):
padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :])
else:
padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post)
padding_buffer[..., pad_pre : (w + pad_pre)].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1].copy_(x[..., p])
for p in range(pad_post):
padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)])
return padding_buffer
def padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int,
padding_type: typing.Optional[str] = "reflect",
) -> torch.Tensor:
if padding_type is None:
return x
elif padding_type == "reflect":
x_pad = reflect_padding(x, dim, pad_pre, pad_post)
else:
raise ValueError("{} padding is not supported!".format(padding_type))
return x_pad
def get_padding(
base: torch.Tensor, kernel_size: int, x_size: int
) -> typing.Tuple[int, int, torch.Tensor]:
base = base.long()
r_min = base.min()
r_max = base.max() + kernel_size - 1
if r_min <= 0:
pad_pre = -r_min
pad_pre = pad_pre.item()
base += pad_pre
else:
pad_pre = 0
if r_max >= x_size:
pad_post = r_max - x_size + 1
pad_post = pad_post.item()
else:
pad_post = 0
return pad_pre, pad_post, base
def get_weight(
dist: torch.Tensor,
kernel_size: int,
kernel: str = "cubic",
sigma: float = 2.0,
antialiasing_factor: float = 1,
) -> torch.Tensor:
buffer_pos = dist.new_zeros(kernel_size, len(dist))
for idx, buffer_sub in enumerate(buffer_pos):
buffer_sub.copy_(dist - idx)
# Expand (downsampling) / Shrink (upsampling) the receptive field.
buffer_pos *= antialiasing_factor
if kernel == "cubic":
weight = cubic_contribution(buffer_pos)
elif kernel == "gaussian":
weight = gaussian_contribution(buffer_pos, sigma=sigma)
else:
raise ValueError("{} kernel is not supported!".format(kernel))
weight /= weight.sum(dim=0, keepdim=True)
return weight
def reshape_tensor(x: torch.Tensor, dim: int, kernel_size: int) -> torch.Tensor:
# Resize height
if dim == 2 or dim == -2:
k = (kernel_size, 1)
h_out = x.size(-2) - kernel_size + 1
w_out = x.size(-1)
# Resize width
else:
k = (1, kernel_size)
h_out = x.size(-2)
w_out = x.size(-1) - kernel_size + 1
unfold = F.unfold(x, k)
unfold = unfold.view(unfold.size(0), -1, h_out, w_out)
return unfold
def reshape_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _I, _I, int, int]:
if x.dim() == 4:
b, c, h, w = x.size()
elif x.dim() == 3:
c, h, w = x.size()
b = None
elif x.dim() == 2:
h, w = x.size()
b = c = None
else:
raise ValueError("{}-dim Tensor is not supported!".format(x.dim()))
x = x.view(-1, 1, h, w)
return x, b, c, h, w
def reshape_output(x: torch.Tensor, b: _I, c: _I) -> torch.Tensor:
rh = x.size(-2)
rw = x.size(-1)
# Back to the original dimension
if b is not None:
x = x.view(b, c, rh, rw) # 4-dim
else:
if c is not None:
x = x.view(c, rh, rw) # 3-dim
else:
x = x.view(rh, rw) # 2-dim
return x
def cast_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _D]:
if x.dtype != torch.float32 or x.dtype != torch.float64:
dtype = x.dtype
x = x.float()
else:
dtype = None
return x, dtype
def cast_output(x: torch.Tensor, dtype: _D) -> torch.Tensor:
if dtype is not None:
if not dtype.is_floating_point:
x = x - x.detach() + x.round()
# To prevent over/underflow when converting types
if dtype is torch.uint8:
x = x.clamp(0, 255)
x = x.to(dtype=dtype)
return x
def resize_1d(
x: torch.Tensor,
dim: int,
size: int,
scale: float,
kernel: str = "cubic",
sigma: float = 2.0,
padding_type: str = "reflect",
antialiasing: bool = True,
) -> torch.Tensor:
"""
Args:
x (torch.Tensor): A torch.Tensor of dimension (B x C, 1, H, W).
dim (int):
scale (float):
size (int):
Return:
"""
# Identity case
if scale == 1:
return x
# Default bicubic kernel with antialiasing (only when downsampling)
if kernel == "cubic":
kernel_size = 4
else:
kernel_size = math.floor(6 * sigma)
if antialiasing and (scale < 1):
antialiasing_factor = scale
kernel_size = math.ceil(kernel_size / antialiasing_factor)
else:
antialiasing_factor = 1
# We allow margin to both sizes
kernel_size += 2
# Weights only depend on the shape of input and output,
# so we do not calculate gradients here.
with torch.no_grad():
pos = torch.linspace(
0,
size - 1,
steps=size,
dtype=x.dtype,
device=x.device,
)
pos = (pos + 0.5) / scale - 0.5
base = pos.floor() - (kernel_size // 2) + 1
dist = pos - base
weight = get_weight(
dist,
kernel_size,
kernel=kernel,
sigma=sigma,
antialiasing_factor=antialiasing_factor,
)
pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim))
# To backpropagate through x
x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type)
unfold = reshape_tensor(x_pad, dim, kernel_size)
# Subsampling first
if dim == 2 or dim == -2:
sample = unfold[..., base, :]
weight = weight.view(1, kernel_size, sample.size(2), 1)
else:
sample = unfold[..., base]
weight = weight.view(1, kernel_size, 1, sample.size(3))
# Apply the kernel
x = sample * weight
x = x.sum(dim=1, keepdim=True)
return x
def downsampling_2d(
x: torch.Tensor, k: torch.Tensor, scale: int, padding_type: str = "reflect"
) -> torch.Tensor:
c = x.size(1)
k_h = k.size(-2)
k_w = k.size(-1)
k = k.to(dtype=x.dtype, device=x.device)
k = k.view(1, 1, k_h, k_w)
k = k.repeat(c, c, 1, 1)
e = torch.eye(c, dtype=k.dtype, device=k.device, requires_grad=False)
e = e.view(c, c, 1, 1)
k = k * e
pad_h = (k_h - scale) // 2
pad_w = (k_w - scale) // 2
x = padding(x, -2, pad_h, pad_h, padding_type=padding_type)
x = padding(x, -1, pad_w, pad_w, padding_type=padding_type)
y = F.conv2d(x, k, padding=0, stride=scale)
return y
def imresize(
x: torch.Tensor,
scale: typing.Optional[float] = None,
sizes: typing.Optional[typing.Tuple[int, int]] = None,
kernel: typing.Union[str, torch.Tensor] = "cubic",
sigma: float = 2,
rotation_degree: float = 0,
padding_type: str = "reflect",
antialiasing: bool = True,
) -> torch.Tensor:
"""
Args:
x (torch.Tensor):
scale (float):
sizes (tuple(int, int)):
kernel (str, default='cubic'):
sigma (float, default=2):
rotation_degree (float, default=0):
padding_type (str, default='reflect'):
antialiasing (bool, default=True):
Return:
torch.Tensor:
"""
if scale is None and sizes is None:
raise ValueError("One of scale or sizes must be specified!")
if scale is not None and sizes is not None:
raise ValueError("Please specify scale or sizes to avoid conflict!")
x, b, c, h, w = reshape_input(x)
if sizes is None and scale is not None:
"""
# Check if we can apply the convolution algorithm
scale_inv = 1 / scale
if isinstance(kernel, str) and scale_inv.is_integer():
kernel = discrete_kernel(kernel, scale, antialiasing=antialiasing)
elif isinstance(kernel, torch.Tensor) and not scale_inv.is_integer():
raise ValueError(
'An integer downsampling factor '
'should be used with a predefined kernel!'
)
"""
# Determine output size
sizes = (math.ceil(h * scale), math.ceil(w * scale))
scales = (scale, scale)
if scale is None and sizes is not None:
scales = (sizes[0] / h, sizes[1] / w)
x, dtype = cast_input(x)
if isinstance(kernel, str) and sizes is not None:
# Core resizing module
x = resize_1d(
x,
-2,
size=sizes[0],
scale=scales[0],
kernel=kernel,
sigma=sigma,
padding_type=padding_type,
antialiasing=antialiasing,
)
x = resize_1d(
x,
-1,
size=sizes[1],
scale=scales[1],
kernel=kernel,
sigma=sigma,
padding_type=padding_type,
antialiasing=antialiasing,
)
elif isinstance(kernel, torch.Tensor) and scale is not None:
x = downsampling_2d(x, kernel, scale=int(1 / scale))
x = reshape_output(x, b, c)
x = cast_output(x, dtype)
return x
| 12,696 | 27.404922 | 83 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/.ipynb_checkpoints/functions-checkpoint.py | import math
import numpy as np
import torch
import torch.nn.functional as F
from pyiqa.archs.arch_util import ExactPadding2d, symm_pad, to_2tuple
def fspecial(size=None, sigma=None, channels=1, filter_type="gaussian"):
r"""Function same as 'fspecial' in MATLAB, only support gaussian now.
Args:
size (int or tuple): size of window
sigma (float): sigma of gaussian
channels (int): channels of output
"""
if filter_type == "gaussian":
shape = to_2tuple(size)
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
h = torch.from_numpy(h).float().repeat(channels, 1, 1, 1)
return h
else:
raise NotImplementedError(
f"Only support gaussian filter now, got {filter_type}"
)
def conv2d(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""Matlab like conv2, weights needs to be flipped.
Args:
input (tensor): (b, c, h, w)
weight (tensor): (out_ch, in_ch, kh, kw), conv weight
bias (bool or None): bias
stride (int or tuple): conv stride
padding (str): padding mode
dilation (int): conv dilation
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
weight = torch.flip(weight, dims=(-1, -2))
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def imfilter(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""imfilter same as matlab.
Args:
input (tensor): (b, c, h, w) tensor to be filtered
weight (tensor): (out_ch, in_ch, kh, kw) filter kernel
padding (str): padding mode
dilation (int): dilation of conv
groups (int): groups of conv
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def filter2(input, weight, shape="same"):
if shape == "same":
return imfilter(input, weight, groups=input.shape[1])
elif shape == "valid":
return F.conv2d(input, weight, stride=1, padding=0, groups=input.shape[1])
else:
raise NotImplementedError(f"Shape type {shape} is not implemented.")
def dct(x, norm=None):
"""
Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
Args:
x: the input signal
norm: the normalization, None or 'ortho'
Return:
the DCT-II of the signal over the last dimension
"""
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=-1)
Vc = torch.view_as_real(torch.fft.fft(v, dim=-1))
k = -torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i
if norm == "ortho":
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
def dct2d(x, norm="ortho"):
"""
2-dimentional Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
:param x: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last 2 dimensions
"""
X1 = dct(x, norm=norm)
X2 = dct(X1.transpose(-1, -2), norm=norm)
return X2.transpose(-1, -2)
def fitweibull(x, iters=50, eps=1e-2):
"""Simulate wblfit function in matlab.
ref: https://github.com/mlosch/python-weibullfit/blob/master/weibull/backend_pytorch.py
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x (tensor): (B, N), batch of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:param use_cuda: Use gpu
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = torch.log(x)
k = 1.2 / torch.std(ln_x, dim=1, keepdim=True)
k_t_1 = k
for t in range(iters):
# Partial derivative df/dk
x_k = x ** k.repeat(1, x.shape[1])
x_k_ln_x = x_k * ln_x
ff = torch.sum(x_k_ln_x, dim=-1, keepdim=True)
fg = torch.sum(x_k, dim=-1, keepdim=True)
f1 = torch.mean(ln_x, dim=-1, keepdim=True)
f = ff / fg - f1 - (1.0 / k)
ff_prime = torch.sum(x_k_ln_x * ln_x, dim=-1, keepdim=True)
fg_prime = ff
f_prime = (ff_prime / fg - (ff / fg * fg_prime / fg)) + (1.0 / (k * k))
# Newton-Raphson method k = k - f(k;x)/f'(k;x)
k = k - f / f_prime
error = torch.abs(k - k_t_1).max().item()
if error < eps:
break
k_t_1 = k
# Lambda (scale) can be calculated directly
lam = torch.mean(x ** k.repeat(1, x.shape[1]), dim=-1, keepdim=True) ** (1.0 / k)
return torch.cat((k, lam), dim=1) # Shape (SC), Scale (FE)
def cov(tensor, rowvar=True, bias=False):
r"""Estimate a covariance matrix (np.cov)
Ref: https://gist.github.com/ModarTensai/5ab449acba9df1a26c12060240773110
"""
tensor = tensor if rowvar else tensor.transpose(-1, -2)
tensor = tensor - tensor.mean(dim=-1, keepdim=True)
if tensor.shape[-1] > 1:
factor = 1 / (tensor.shape[-1] - int(not bool(bias)))
else:
factor = 1
return factor * tensor @ tensor.transpose(-1, -2)
def nancov(x):
r"""Calculate nancov for batched tensor, rows that contains nan value
will be removed.
Args:
x (tensor): (B, row_num, feat_dim)
Return:
cov (tensor): (B, feat_dim, feat_dim)
"""
assert (
len(x.shape) == 3
), f"Shape of input should be (batch_size, row_num, feat_dim), but got {x.shape}"
b, rownum, feat_dim = x.shape
nan_mask = torch.isnan(x).any(dim=2, keepdim=True)
cov_x = []
for i in range(b):
x_no_nan = x[i].masked_select(~nan_mask[i]).reshape(-1, feat_dim)
cov_x.append(cov(x_no_nan, rowvar=False))
return torch.stack(cov_x)
def nanmean(v, *args, inplace=False, **kwargs):
r"""nanmean same as matlab function: calculate mean values by removing all nan."""
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
def im2col(x, kernel, mode="sliding"):
r"""simple im2col as matlab
Args:
x (Tensor): shape (b, c, h, w)
kernel (int): kernel size
mode (string):
- sliding (default): rearranges sliding image neighborhoods of kernel size into columns with no zero-padding
- distinct: rearranges discrete image blocks of kernel size into columns, zero pad right and bottom if necessary
Return:
flatten patch (Tensor): (b, h * w / kernel **2, kernel * kernel)
"""
b, c, h, w = x.shape
kernel = to_2tuple(kernel)
if mode == "sliding":
stride = 1
elif mode == "distinct":
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
x = F.pad(x, (0, pad_col, 0, pad_row))
else:
raise NotImplementedError(f"Type {mode} is not implemented yet.")
patches = F.unfold(x, kernel, dilation=1, stride=stride)
b, _, pnum = patches.shape
patches = patches.transpose(1, 2).reshape(b, pnum, -1)
return patches
def blockproc(
x, kernel, fun, border_size=None, pad_partial=False, pad_method="zero", **func_args
):
r"""blockproc function like matlab
Difference:
- Partial blocks is discarded (if exist) for fast GPU process.
Args:
x (tensor): shape (b, c, h, w)
kernel (int or tuple): block size
func (function): function to process each block
border_size (int or tuple): border pixels to each block
pad_partial: pad partial blocks to make them full-sized, default False
pad_method: [zero, replicate, symmetric] how to pad partial block when pad_partial is set True
Return:
results (tensor): concatenated results of each block
"""
assert len(x.shape) == 4, f"Shape of input has to be (b, c, h, w) but got {x.shape}"
kernel = to_2tuple(kernel)
if pad_partial:
b, c, h, w = x.shape
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
padding = (0, pad_col, 0, pad_row)
if pad_method == "zero":
x = F.pad(x, padding, mode="constant")
elif pad_method == "symmetric":
x = symm_pad(x, padding)
else:
x = F.pad(x, padding, mode=pad_method)
if border_size is not None:
raise NotImplementedError("Blockproc with border is not implemented yet")
else:
b, c, h, w = x.shape
block_size_h, block_size_w = kernel
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
# extract blocks in (row, column) manner, i.e., stored with column first
blocks = F.unfold(x, kernel, stride=kernel)
blocks = blocks.reshape(b, c, *kernel, num_block_h, num_block_w)
blocks = blocks.permute(5, 4, 0, 1, 2, 3).reshape(
num_block_h * num_block_w * b, c, *kernel
)
results = fun(blocks, func_args)
results = results.reshape(
num_block_h * num_block_w, b, *results.shape[1:]
).transpose(0, 1)
return results
| 10,439 | 33.569536 | 124 | py |
BVQI | BVQI-master/pyiqa/models/lr_scheduler.py | import math
from collections import Counter
from torch.optim.lr_scheduler import _LRScheduler
class MultiStepRestartLR(_LRScheduler):
"""MultiStep with restarts learning rate scheme.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
milestones (list): Iterations that will decrease learning rate.
gamma (float): Decrease ratio. Default: 0.1.
restarts (list): Restart iterations. Default: [0].
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
last_epoch (int): Used in _LRScheduler. Default: -1.
"""
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
restarts=(0,),
restart_weights=(1,),
last_epoch=-1,
):
self.milestones = Counter(milestones)
self.gamma = gamma
self.restarts = restarts
self.restart_weights = restart_weights
assert len(self.restarts) == len(
self.restart_weights
), "restarts and their weights do not match."
super(MultiStepRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch in self.restarts:
weight = self.restart_weights[self.restarts.index(self.last_epoch)]
return [
group["initial_lr"] * weight for group in self.optimizer.param_groups
]
if self.last_epoch not in self.milestones:
return [group["lr"] for group in self.optimizer.param_groups]
return [
group["lr"] * self.gamma ** self.milestones[self.last_epoch]
for group in self.optimizer.param_groups
]
def get_position_from_periods(iteration, cumulative_period):
"""Get the position from a period list.
It will return the index of the right-closest number in the period list.
For example, the cumulative_period = [100, 200, 300, 400],
if iteration == 50, return 0;
if iteration == 210, return 2;
if iteration == 300, return 2.
Args:
iteration (int): Current iteration.
cumulative_period (list[int]): Cumulative period list.
Returns:
int: The position of the right-closest number in the period list.
"""
for i, period in enumerate(cumulative_period):
if iteration <= period:
return i
class CosineAnnealingRestartLR(_LRScheduler):
"""Cosine annealing with restarts learning rate scheme.
An example of config:
periods = [10, 10, 10, 10]
restart_weights = [1, 0.5, 0.5, 0.5]
eta_min=1e-7
It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the
scheduler will restart with the weights in restart_weights.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
periods (list): Period for each cosine anneling cycle.
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
eta_min (float): The minimum lr. Default: 0.
last_epoch (int): Used in _LRScheduler. Default: -1.
"""
def __init__(
self, optimizer, periods, restart_weights=(1,), eta_min=0, last_epoch=-1
):
self.periods = periods
self.restart_weights = restart_weights
self.eta_min = eta_min
assert len(self.periods) == len(
self.restart_weights
), "periods and restart_weights should have the same length."
self.cumulative_period = [
sum(self.periods[0 : i + 1]) for i in range(0, len(self.periods))
]
super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
idx = get_position_from_periods(self.last_epoch, self.cumulative_period)
current_weight = self.restart_weights[idx]
nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1]
current_period = self.periods[idx]
return [
self.eta_min
+ current_weight
* 0.5
* (base_lr - self.eta_min)
* (
1
+ math.cos(
math.pi * ((self.last_epoch - nearest_restart) / current_period)
)
)
for base_lr in self.base_lrs
]
| 4,268 | 32.880952 | 85 | py |
BVQI | BVQI-master/pyiqa/models/base_model.py | import os
import time
from collections import OrderedDict
from copy import deepcopy
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from pyiqa.models import lr_scheduler as lr_scheduler
from pyiqa.utils import get_root_logger
from pyiqa.utils.dist_util import master_only
class BaseModel:
"""Base model."""
def __init__(self, opt):
self.opt = opt
self.device = torch.device("cuda" if opt["num_gpu"] != 0 else "cpu")
self.is_train = opt["is_train"]
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def save(self, epoch, current_iter):
"""Save networks and training state."""
pass
def validation(self, dataloader, current_iter, tb_logger, save_img=False):
"""Validation function.
Args:
dataloader (torch.utils.data.DataLoader): Validation dataloader.
current_iter (int): Current iteration.
tb_logger (tensorboard logger): Tensorboard logger.
save_img (bool): Whether to save images. Default: False.
"""
if self.opt["dist"]:
self.dist_validation(dataloader, current_iter, tb_logger, save_img)
else:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def _initialize_best_metric_results(self, dataset_name):
"""Initialize the best metric results dict for recording the best metric value and iteration."""
if (
hasattr(self, "best_metric_results")
and dataset_name in self.best_metric_results
):
return
elif not hasattr(self, "best_metric_results"):
self.best_metric_results = dict()
# add a dataset record
record = dict()
for metric, content in self.opt["val"]["metrics"].items():
better = content.get("better", "higher")
init_val = float("-inf") if better == "higher" else float("inf")
record[metric] = dict(better=better, val=init_val, iter=-1)
self.best_metric_results[dataset_name] = record
self.key_metric = self.opt["val"].get("key_metric", None)
def _update_metric_result(self, dataset_name, metric, val, current_iter):
self.best_metric_results[dataset_name][metric]["val"] = val
self.best_metric_results[dataset_name][metric]["iter"] = current_iter
def _update_best_metric_result(self, dataset_name, metric, val, current_iter):
if self.best_metric_results[dataset_name][metric]["better"] == "higher":
if val >= self.best_metric_results[dataset_name][metric]["val"]:
self.best_metric_results[dataset_name][metric]["val"] = val
self.best_metric_results[dataset_name][metric]["iter"] = current_iter
return True
else:
return False
else:
if val <= self.best_metric_results[dataset_name][metric]["val"]:
self.best_metric_results[dataset_name][metric]["val"] = val
self.best_metric_results[dataset_name][metric]["iter"] = current_iter
return True
else:
return False
def model_ema(self, decay=0.999):
net_g = self.get_bare_model(self.net_g)
net_g_params = dict(net_g.named_parameters())
net_g_ema_params = dict(self.net_g_ema.named_parameters())
for k in net_g_ema_params.keys():
net_g_ema_params[k].data.mul_(decay).add_(
net_g_params[k].data, alpha=1 - decay
)
def copy_model(self, net_a, net_b):
"""copy model from net_a to net_b"""
tmp_net_a = self.get_bare_model(net_a)
tmp_net_b = self.get_bare_model(net_b)
tmp_net_b.load_state_dict(tmp_net_a.state_dict())
def get_current_log(self):
return self.log_dict
def model_to_device(self, net):
"""Model to device. It also warps models with DistributedDataParallel
or DataParallel.
Args:
net (nn.Module)
"""
net = net.to(self.device)
if self.opt["dist"]:
find_unused_parameters = self.opt.get("find_unused_parameters", False)
net = DistributedDataParallel(
net,
device_ids=[torch.cuda.current_device()],
find_unused_parameters=find_unused_parameters,
)
elif self.opt["num_gpu"] > 1:
net = DataParallel(net)
return net
def get_optimizer(self, optim_type, params, lr, **kwargs):
optim_class = getattr(torch.optim, optim_type)
optimizer = optim_class(params, lr, **kwargs)
return optimizer
def setup_schedulers(self, scheduler_name="scheduler"):
"""Set up schedulers."""
train_opt = self.opt["train"]
scheduler_type = train_opt[scheduler_name].pop("type")
if scheduler_type in ["MultiStepLR", "MultiStepRestartLR"]:
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.MultiStepRestartLR(
optimizer, **train_opt[scheduler_name]
)
)
elif scheduler_type == "CosineAnnealingRestartLR":
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.CosineAnnealingRestartLR(
optimizer, **train_opt[scheduler_name]
)
)
else:
scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)
for optimizer in self.optimizers:
self.schedulers.append(
scheduler(optimizer, **train_opt[scheduler_name])
)
def get_bare_model(self, net):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(net, (DataParallel, DistributedDataParallel)):
net = net.module
return net
@master_only
def print_network(self, net):
"""print the str and parameter number of a network.
Args:
net (nn.Module)
"""
if isinstance(net, (DataParallel, DistributedDataParallel)):
net_cls_str = f"{net.__class__.__name__} - {net.module.__class__.__name__}"
else:
net_cls_str = f"{net.__class__.__name__}"
net = self.get_bare_model(net)
net_str = str(net)
net_params = sum(map(lambda x: x.numel(), net.parameters()))
logger = get_root_logger()
logger.info(f"Network: {net_cls_str}, with parameters: {net_params:,d}")
logger.info(net_str)
def _set_lr(self, lr_groups_l):
"""Set learning rate for warmup.
Args:
lr_groups_l (list): List for lr_groups, each for an optimizer.
"""
for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
for param_group, lr in zip(optimizer.param_groups, lr_groups):
param_group["lr"] = lr
def _get_init_lr(self):
"""Get the initial lr, which is set by the scheduler."""
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v["initial_lr"] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, current_iter, warmup_iter=-1):
"""Update learning rate.
Args:
current_iter (int): Current iteration.
warmup_iter (int): Warmup iter numbers. -1 for no warmup.
Default: -1.
"""
if current_iter > 1:
for scheduler in self.schedulers:
scheduler.step()
# set up warm-up learning rate
if current_iter < warmup_iter:
# get initial lr for each group
init_lr_g_l = self._get_init_lr()
# modify warming-up learning rates
# currently only support linearly warm up
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([v / warmup_iter * current_iter for v in init_lr_g])
# set learning rate
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return [param_group["lr"] for param_group in self.optimizers[0].param_groups]
@master_only
def save_network(self, net, net_label, current_iter=None, param_key="params"):
"""Save networks.
Args:
net (nn.Module | list[nn.Module]): Network(s) to be saved.
net_label (str): Network label.
current_iter (int): Current iter number.
param_key (str | list[str]): The parameter key(s) to save network.
Default: 'params'.
"""
if current_iter == -1:
current_iter = "latest"
if current_iter is not None:
save_filename = f"{net_label}_{current_iter}.pth"
else:
save_filename = f"{net_label}.pth"
save_path = os.path.join(self.opt["path"]["models"], save_filename)
net = net if isinstance(net, list) else [net]
param_key = param_key if isinstance(param_key, list) else [param_key]
assert len(net) == len(
param_key
), "The lengths of net and param_key should be the same."
save_dict = {}
for net_, param_key_ in zip(net, param_key):
net_ = self.get_bare_model(net_)
state_dict = net_.state_dict()
for key, param in state_dict.items():
if key.startswith("module."): # remove unnecessary 'module.'
key = key[7:]
state_dict[key] = param.cpu()
save_dict[param_key_] = state_dict
# avoid occasional writing errors
retry = 3
while retry > 0:
try:
torch.save(save_dict, save_path)
except Exception as e:
logger = get_root_logger()
logger.warning(
f"Save model error: {e}, remaining retry times: {retry - 1}"
)
time.sleep(1)
else:
break
finally:
retry -= 1
if retry == 0:
logger.warning(f"Still cannot save {save_path}. Just ignore it.")
# raise IOError(f'Cannot save {save_path}.')
def _print_different_keys_loading(self, crt_net, load_net, strict=True):
"""print keys with different name or different size when loading models.
1. print keys with different names.
2. If strict=False, print the same key but with different tensor size.
It also ignore these keys with different sizes (not load).
Args:
crt_net (torch model): Current network.
load_net (dict): Loaded network.
strict (bool): Whether strictly loaded. Default: True.
"""
crt_net = self.get_bare_model(crt_net)
crt_net = crt_net.state_dict()
crt_net_keys = set(crt_net.keys())
load_net_keys = set(load_net.keys())
logger = get_root_logger()
if crt_net_keys != load_net_keys:
logger.warning("Current net - loaded net:")
for v in sorted(list(crt_net_keys - load_net_keys)):
logger.warning(f" {v}")
logger.warning("Loaded net - current net:")
for v in sorted(list(load_net_keys - crt_net_keys)):
logger.warning(f" {v}")
# check the size for the same keys
if not strict:
common_keys = crt_net_keys & load_net_keys
for k in common_keys:
if crt_net[k].size() != load_net[k].size():
logger.warning(
f"Size different, ignore [{k}]: crt_net: "
f"{crt_net[k].shape}; load_net: {load_net[k].shape}"
)
load_net[k + ".ignore"] = load_net.pop(k)
def load_network(self, net, load_path, strict=True, param_key="params"):
"""Load network.
Args:
load_path (str): The path of networks to be loaded.
net (nn.Module): Network.
strict (bool): Whether strictly loaded.
param_key (str): The parameter key of loaded network. If set to
None, use the root 'path'.
Default: 'params'.
"""
logger = get_root_logger()
net = self.get_bare_model(net)
load_net = torch.load(load_path, map_location=lambda storage, loc: storage)
if param_key is not None:
if param_key not in load_net and "params" in load_net:
param_key = "params"
logger.info("Loading: params_ema does not exist, use params.")
load_net = load_net[param_key]
logger.info(
f"Loading {net.__class__.__name__} model from {load_path}, with param key: [{param_key}]."
)
# remove unnecessary 'module.'
for k, v in deepcopy(load_net).items():
if k.startswith("module."):
load_net[k[7:]] = v
load_net.pop(k)
self._print_different_keys_loading(net, load_net, strict)
net.load_state_dict(load_net, strict=strict)
@master_only
def save_training_state(self, epoch, current_iter):
"""Save training states during training, which will be used for
resuming.
Args:
epoch (int): Current epoch.
current_iter (int): Current iteration.
"""
if current_iter != -1:
state = {
"epoch": epoch,
"iter": current_iter,
"optimizers": [],
"schedulers": [],
}
for o in self.optimizers:
state["optimizers"].append(o.state_dict())
for s in self.schedulers:
state["schedulers"].append(s.state_dict())
save_filename = f"{current_iter}.state"
save_path = os.path.join(self.opt["path"]["training_states"], save_filename)
# avoid occasional writing errors
retry = 3
while retry > 0:
try:
torch.save(state, save_path)
except Exception as e:
logger = get_root_logger()
logger.warning(
f"Save training state error: {e}, remaining retry times: {retry - 1}"
)
time.sleep(1)
else:
break
finally:
retry -= 1
if retry == 0:
logger.warning(f"Still cannot save {save_path}. Just ignore it.")
# raise IOError(f'Cannot save {save_path}.')
def resume_training(self, resume_state):
"""Reload the optimizers and schedulers for resumed training.
Args:
resume_state (dict): Resume state.
"""
resume_optimizers = resume_state["optimizers"]
resume_schedulers = resume_state["schedulers"]
assert len(resume_optimizers) == len(
self.optimizers
), "Wrong lengths of optimizers"
assert len(resume_schedulers) == len(
self.schedulers
), "Wrong lengths of schedulers"
for i, o in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for i, s in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
def reduce_loss_dict(self, loss_dict):
"""reduce loss dict.
In distributed training, it averages the losses among different GPUs .
Args:
loss_dict (OrderedDict): Loss dict.
"""
with torch.no_grad():
if self.opt["dist"]:
keys = []
losses = []
for name, value in loss_dict.items():
keys.append(name)
losses.append(value)
losses = torch.stack(losses, 0)
torch.distributed.reduce(losses, dst=0)
if self.opt["rank"] == 0:
losses /= self.opt["world_size"]
loss_dict = {key: loss for key, loss in zip(keys, losses)}
log_dict = OrderedDict()
for name, value in loss_dict.items():
log_dict[name] = value.mean().item()
return log_dict
| 16,657 | 36.859091 | 104 | py |
BVQI | BVQI-master/pyiqa/models/hypernet_model.py | from collections import OrderedDict
import torch
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class HyperNetModel(GeneralIQAModel):
"""General module to train an IQA network."""
def test(self):
self.net.eval()
with torch.no_grad():
self.output_score = self.get_bare_model(self.net).random_crop_test(
self.img_input
)
self.net.train()
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_opt = train_opt["optim"]
bare_net = self.get_bare_model(self.net)
optim_params = [
{
"params": bare_net.base_model.parameters(),
"lr": optim_opt.pop("lr_basemodel"),
},
{
"params": [
p for k, p in bare_net.named_parameters() if "base_model" not in k
],
"lr": optim_opt.pop("lr_hypermodule"),
},
]
optim_type = optim_opt.pop("type")
self.optimizer = self.get_optimizer(optim_type, optim_params, **optim_opt)
self.optimizers.append(self.optimizer)
| 1,260 | 28.325581 | 86 | py |
BVQI | BVQI-master/pyiqa/models/wadiqam_model.py | from collections import OrderedDict
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class WaDIQaMModel(GeneralIQAModel):
"""General module to train an IQA network."""
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_opt = train_opt["optim"]
bare_net = self.get_bare_model(self.net)
optim_params = [
{
"params": bare_net.features.parameters(),
"lr": optim_opt.pop("lr_basemodel"),
},
{
"params": [
p for k, p in bare_net.named_parameters() if "features" not in k
],
"lr": optim_opt.pop("lr_fc_layers"),
},
]
optim_type = optim_opt.pop("type")
self.optimizer = self.get_optimizer(optim_type, optim_params, **optim_opt)
self.optimizers.append(self.optimizer)
| 1,014 | 29.757576 | 84 | py |
BVQI | BVQI-master/pyiqa/models/dbcnn_model.py | from collections import OrderedDict
from os import path as osp
import torch
from tqdm import tqdm
from pyiqa.archs import build_network
from pyiqa.losses import build_loss
from pyiqa.metrics import calculate_metric
from pyiqa.models import lr_scheduler as lr_scheduler
from pyiqa.utils import get_root_logger, imwrite, logger, tensor2img
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class DBCNNModel(GeneralIQAModel):
"""General module to train an IQA network."""
def __init__(self, opt):
super(DBCNNModel, self).__init__(opt)
self.train_stage = "train"
def reset_optimizers_finetune(self):
logger = get_root_logger()
logger.info(f"\n Start finetune stage. Set all parameters trainable\n")
train_opt = self.opt["train"]
optim_params = []
for k, v in self.net.named_parameters():
v.requires_grad = True
optim_params.append(v)
optim_type = train_opt["optim_finetune"].pop("type")
self.optimizer = self.get_optimizer(
optim_type, optim_params, **train_opt["optim_finetune"]
)
self.optimizers = [self.optimizer]
# reset schedulers
self.schedulers = []
self.setup_schedulers("scheduler_finetune")
def optimize_parameters(self, current_iter):
if (
current_iter >= self.opt["train"]["finetune_start_iter"]
and self.train_stage != "finetune"
):
# copy best model from coarse training stage and reset optimizers
self.copy_model(self.net_best, self.net)
self.reset_optimizers_finetune()
self.train_stage = "finetune"
super().optimize_parameters(current_iter)
| 1,797 | 31.690909 | 79 | py |
BVQI | BVQI-master/pyiqa/models/sr_model.py | from collections import OrderedDict
from os import path as osp
import torch
from tqdm import tqdm
from pyiqa.archs import build_network
from pyiqa.losses import build_loss
from pyiqa.metrics import calculate_metric
from pyiqa.utils import get_root_logger, imwrite, tensor2img
from pyiqa.utils.registry import MODEL_REGISTRY
from .base_model import BaseModel
@MODEL_REGISTRY.register()
class SRModel(BaseModel):
"""Base SR model for single image super-resolution."""
def __init__(self, opt):
super(SRModel, self).__init__(opt)
# define network
self.net_g = build_network(opt["network_g"])
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
# load pretrained models
load_path = self.opt["path"].get("pretrain_network_g", None)
if load_path is not None:
param_key = self.opt["path"].get("param_key_g", "params")
self.load_network(
self.net_g,
load_path,
self.opt["path"].get("strict_load_g", True),
param_key,
)
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt["train"]
self.ema_decay = train_opt.get("ema_decay", 0)
if self.ema_decay > 0:
logger = get_root_logger()
logger.info(f"Use Exponential Moving Average with decay: {self.ema_decay}")
# define network net_g with Exponential Moving Average (EMA)
# net_g_ema is used only for testing on one GPU and saving
# There is no need to wrap with DistributedDataParallel
self.net_g_ema = build_network(self.opt["network_g"]).to(self.device)
# load pretrained model
load_path = self.opt["path"].get("pretrain_network_g", None)
if load_path is not None:
self.load_network(
self.net_g_ema,
load_path,
self.opt["path"].get("strict_load_g", True),
"params_ema",
)
else:
self.model_ema(0) # copy net_g weight
self.net_g_ema.eval()
# define losses
if train_opt.get("pixel_opt"):
self.cri_pix = build_loss(train_opt["pixel_opt"]).to(self.device)
else:
self.cri_pix = None
if train_opt.get("perceptual_opt"):
self.cri_perceptual = build_loss(train_opt["perceptual_opt"]).to(
self.device
)
else:
self.cri_perceptual = None
if self.cri_pix is None and self.cri_perceptual is None:
raise ValueError("Both pixel and perceptual losses are None.")
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_params = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f"Params {k} will not be optimized.")
optim_type = train_opt["optim_g"].pop("type")
self.optimizer_g = self.get_optimizer(
optim_type, optim_params, **train_opt["optim_g"]
)
self.optimizers.append(self.optimizer_g)
def feed_data(self, data):
self.lq = data["lq"].to(self.device)
if "gt" in data:
self.gt = data["gt"].to(self.device)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_pix:
l_pix = self.cri_pix(self.output, self.gt)
l_total += l_pix
loss_dict["l_pix"] = l_pix
# perceptual loss
if self.cri_perceptual:
l_percep, l_style = self.cri_perceptual(self.output, self.gt)
if l_percep is not None:
l_total += l_percep
loss_dict["l_percep"] = l_percep
if l_style is not None:
l_total += l_style
loss_dict["l_style"] = l_style
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if self.ema_decay > 0:
self.model_ema(decay=self.ema_decay)
def test(self):
if hasattr(self, "net_g_ema"):
self.net_g_ema.eval()
with torch.no_grad():
self.output = self.net_g_ema(self.lq)
else:
self.net_g.eval()
with torch.no_grad():
self.output = self.net_g(self.lq)
self.net_g.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
if self.opt["rank"] == 0:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt["name"]
with_metrics = self.opt["val"].get("metrics") is not None
use_pbar = self.opt["val"].get("pbar", False)
if with_metrics:
if not hasattr(self, "metric_results"): # only execute in the first run
self.metric_results = {
metric: 0 for metric in self.opt["val"]["metrics"].keys()
}
# initialize the best metric results for each dataset_name (supporting multiple validation datasets)
self._initialize_best_metric_results(dataset_name)
# zero self.metric_results
if with_metrics:
self.metric_results = {metric: 0 for metric in self.metric_results}
metric_data = dict()
if use_pbar:
pbar = tqdm(total=len(dataloader), unit="image")
for idx, val_data in enumerate(dataloader):
img_name = osp.splitext(osp.basename(val_data["lq_path"][0]))[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
sr_img = tensor2img([visuals["result"]])
metric_data["img"] = sr_img
if "gt" in visuals:
gt_img = tensor2img([visuals["gt"]])
metric_data["img2"] = gt_img
del self.gt
# tentative for out of GPU memory
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt["is_train"]:
save_img_path = osp.join(
self.opt["path"]["visualization"],
img_name,
f"{img_name}_{current_iter}.png",
)
else:
if self.opt["val"]["suffix"]:
save_img_path = osp.join(
self.opt["path"]["visualization"],
dataset_name,
f'{img_name}_{self.opt["val"]["suffix"]}.png',
)
else:
save_img_path = osp.join(
self.opt["path"]["visualization"],
dataset_name,
f'{img_name}_{self.opt["name"]}.png',
)
imwrite(sr_img, save_img_path)
if with_metrics:
# calculate metrics
for name, opt_ in self.opt["val"]["metrics"].items():
self.metric_results[name] += calculate_metric(metric_data, opt_)
if use_pbar:
pbar.update(1)
pbar.set_description(f"Test {img_name}")
if use_pbar:
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= idx + 1
# update the best metric result
self._update_best_metric_result(
dataset_name, metric, self.metric_results[metric], current_iter
)
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
log_str = f"Validation {dataset_name}\n"
for metric, value in self.metric_results.items():
log_str += f"\t # {metric}: {value:.4f}"
if hasattr(self, "best_metric_results"):
log_str += (
f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
f'{self.best_metric_results[dataset_name][metric]["iter"]} iter'
)
log_str += "\n"
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(
f"metrics/{dataset_name}/{metric}", value, current_iter
)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict["lq"] = self.lq.detach().cpu()
out_dict["result"] = self.output.detach().cpu()
if hasattr(self, "gt"):
out_dict["gt"] = self.gt.detach().cpu()
return out_dict
def save(self, epoch, current_iter):
if hasattr(self, "net_g_ema"):
self.save_network(
[self.net_g, self.net_g_ema],
"net_g",
current_iter,
param_key=["params", "params_ema"],
)
else:
self.save_network(self.net_g, "net_g", current_iter)
self.save_training_state(epoch, current_iter)
| 9,927 | 35.77037 | 112 | py |
BVQI | BVQI-master/pyiqa/models/pieapp_model.py | from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pyiqa.metrics.correlation_coefficient import calculate_rmse
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class PieAPPModel(GeneralIQAModel):
"""General module to train an IQA network."""
def feed_data(self, data):
is_test = "img" in data.keys()
if "use_ref" in self.opt["train"]:
self.use_ref = self.opt["train"]["use_ref"]
if is_test:
self.img_input = data["img"].to(self.device)
self.gt_mos = data["mos_label"].to(self.device)
self.ref_input = data["ref_img"].to(self.device)
self.ref_img_path = data["ref_img_path"]
self.img_path = data["img_path"]
else:
self.img_A_input = data["distA_img"].to(self.device)
self.img_B_input = data["distB_img"].to(self.device)
self.img_ref_input = data["ref_img"].to(self.device)
self.gt_prob = data["mos_label"].to(self.device)
# from torchvision.utils import save_image
# save_image(torch.cat([self.img_A_input, self.img_B_input, self.img_ref_input], dim=0), 'tmp_test_pieappdataset.jpg')
# exit()
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
score_A = self.net(self.img_A_input, self.img_ref_input)
score_B = self.net(self.img_B_input, self.img_ref_input)
train_output_score = 1 / (1 + torch.exp(score_A - score_B))
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_mos:
l_mos = self.cri_mos(train_output_score, self.gt_prob)
l_total += l_mos
loss_dict["l_mos"] = l_mos
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
pred_score = train_output_score.squeeze(-1).cpu().detach().numpy()
gt_prob = self.gt_prob.squeeze(-1).cpu().detach().numpy()
self.log_dict[f"train_metrics/rmse"] = calculate_rmse(pred_score, gt_prob)
| 2,271 | 32.910448 | 130 | py |
BVQI | BVQI-master/pyiqa/models/general_iqa_model.py | from collections import OrderedDict
from os import path as osp
import torch
from tqdm import tqdm
from pyiqa.archs import build_network
from pyiqa.losses import build_loss
from pyiqa.metrics import calculate_metric
from pyiqa.utils import get_root_logger, imwrite, tensor2img
from pyiqa.utils.registry import MODEL_REGISTRY
from .base_model import BaseModel
@MODEL_REGISTRY.register()
class GeneralIQAModel(BaseModel):
"""General module to train an IQA network."""
def __init__(self, opt):
super(GeneralIQAModel, self).__init__(opt)
# define network
self.net = build_network(opt["network"])
self.net = self.model_to_device(self.net)
self.print_network(self.net)
# load pretrained models
load_path = self.opt["path"].get("pretrain_network", None)
if load_path is not None:
param_key = self.opt["path"].get("param_key_g", "params")
self.load_network(
self.net,
load_path,
self.opt["path"].get("strict_load", True),
param_key,
)
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net.train()
train_opt = self.opt["train"]
self.net_best = build_network(self.opt["network"]).to(self.device)
# define losses
if train_opt.get("mos_loss_opt"):
self.cri_mos = build_loss(train_opt["mos_loss_opt"]).to(self.device)
else:
self.cri_mos = None
# define metric related loss, such as plcc loss
if train_opt.get("metric_loss_opt"):
self.cri_metric = build_loss(train_opt["metric_loss_opt"]).to(self.device)
else:
self.cri_metric = None
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_params = []
for k, v in self.net.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f"Params {k} will not be optimized.")
optim_type = train_opt["optim"].pop("type")
self.optimizer = self.get_optimizer(
optim_type, optim_params, **train_opt["optim"]
)
self.optimizers.append(self.optimizer)
def feed_data(self, data):
self.img_input = data["img"].to(self.device)
if "mos_label" in data:
self.gt_mos = data["mos_label"].to(self.device)
self.use_ref = self.opt["train"].get("use_ref", False)
def net_forward(self, net):
if self.use_ref:
return net(self.img_input, self.ref_input)
else:
return net(self.img_input)
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
self.output_score = self.net_forward(self.net)
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_mos:
l_mos = self.cri_mos(self.output_score, self.gt_mos)
l_total += l_mos
loss_dict["l_mos"] = l_mos
if self.cri_metric:
l_metric = self.cri_metric(self.output_score, self.gt_mos)
l_total += l_metric
loss_dict["l_metric"] = l_metric
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
pred_score = self.output_score.squeeze(1).cpu().detach().numpy()
gt_mos = self.gt_mos.squeeze(1).cpu().detach().numpy()
for name, opt_ in self.opt["val"]["metrics"].items():
self.log_dict[f"train_metrics/{name}"] = calculate_metric(
[pred_score, gt_mos], opt_
)
def test(self):
self.net.eval()
with torch.no_grad():
self.output_score = self.net_forward(self.net)
self.net.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
if self.opt["rank"] == 0:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt["name"]
with_metrics = self.opt["val"].get("metrics") is not None
use_pbar = self.opt["val"].get("pbar", False)
if with_metrics:
if not hasattr(self, "metric_results"): # only execute in the first run
self.metric_results = {
metric: 0 for metric in self.opt["val"]["metrics"].keys()
}
# initialize the best metric results for each dataset_name (supporting multiple validation datasets)
self._initialize_best_metric_results(dataset_name)
# zero self.metric_results
if with_metrics:
self.metric_results = {metric: 0 for metric in self.metric_results}
if use_pbar:
pbar = tqdm(total=len(dataloader), unit="image")
pred_score = []
gt_mos = []
for idx, val_data in enumerate(dataloader):
img_name = osp.basename(val_data["img_path"][0])
self.feed_data(val_data)
self.test()
pred_score.append(self.output_score)
gt_mos.append(self.gt_mos)
if use_pbar:
pbar.update(1)
pbar.set_description(f"Test {img_name:>20}")
if use_pbar:
pbar.close()
pred_score = torch.cat(pred_score, dim=0).squeeze(1).cpu().numpy()
gt_mos = torch.cat(gt_mos, dim=0).squeeze(1).cpu().numpy()
if with_metrics:
# calculate all metrics
for name, opt_ in self.opt["val"]["metrics"].items():
self.metric_results[name] = calculate_metric([pred_score, gt_mos], opt_)
if self.key_metric is not None:
# If the best metric is updated, update and save best model
to_update = self._update_best_metric_result(
dataset_name,
self.key_metric,
self.metric_results[self.key_metric],
current_iter,
)
if to_update:
for name, opt_ in self.opt["val"]["metrics"].items():
self._update_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
else:
# update each metric separately
updated = []
for name, opt_ in self.opt["val"]["metrics"].items():
tmp_updated = self._update_best_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
updated.append(tmp_updated)
# save best model if any metric is updated
if sum(updated):
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
log_str = f"Validation {dataset_name}\n"
for metric, value in self.metric_results.items():
log_str += f"\t # {metric}: {value:.4f}"
if hasattr(self, "best_metric_results"):
log_str += (
f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
f'{self.best_metric_results[dataset_name][metric]["iter"]} iter'
)
log_str += "\n"
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(
f"val_metrics/{dataset_name}/{metric}", value, current_iter
)
def save(self, epoch, current_iter, save_net_label="net"):
self.save_network(self.net, save_net_label, current_iter)
self.save_training_state(epoch, current_iter)
| 8,455 | 36.087719 | 112 | py |
BVQI | BVQI-master/pyiqa/models/bapps_model.py | import os.path as osp
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class BAPPSModel(GeneralIQAModel):
"""General module to train an IQA network."""
def feed_data(self, data):
if "use_ref" in self.opt["train"]:
self.use_ref = self.opt["train"]["use_ref"]
self.img_A_input = data["distA_img"].to(self.device)
self.img_B_input = data["distB_img"].to(self.device)
self.img_ref_input = data["ref_img"].to(self.device)
self.gt_mos = data["mos_label"].to(self.device)
self.img_path = data["img_path"]
# from torchvision.utils import save_image
# print(self.img_ref_input.shape)
# save_image(torch.cat([self.img_ref_input, self.img_A_input, self.img_B_input], dim=0), 'tmp_test_bappsdataset.jpg')
# exit()
def compute_accuracy(self, d0, d1, judge):
d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
acc = d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per)
return acc.mean()
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
score_A = self.net(self.img_A_input, self.img_ref_input)
score_B = self.net(self.img_B_input, self.img_ref_input)
# For BAPPS,
train_output_score = 1 / (1 + torch.exp(score_B - score_A))
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_mos:
l_mos = self.cri_mos(train_output_score, self.gt_mos)
l_total += l_mos
loss_dict["l_mos"] = l_mos
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
self.log_dict[f"train_metrics/acc"] = self.compute_accuracy(
score_A, score_B, self.gt_mos
)
@torch.no_grad()
def test(self):
self.net.eval()
with torch.no_grad():
self.score_A = self.net(self.img_A_input, self.img_ref_input)
self.score_B = self.net(self.img_B_input, self.img_ref_input)
self.net.train()
@torch.no_grad()
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt["name"]
with_metrics = self.opt["val"].get("metrics") is not None
use_pbar = self.opt["val"].get("pbar", False)
if with_metrics:
if not hasattr(self, "metric_results"): # only execute in the first run
self.metric_results = {
metric: 0 for metric in self.opt["val"]["metrics"].keys()
}
# initialize the best metric results for each dataset_name (supporting multiple validation datasets)
self._initialize_best_metric_results(dataset_name)
# zero self.metric_results
if with_metrics:
self.metric_results = {metric: 0 for metric in self.metric_results}
if use_pbar:
pbar = tqdm(total=len(dataloader), unit="image")
pred_score_A = []
pred_score_B = []
gt_mos = []
for idx, val_data in enumerate(dataloader):
img_name = osp.basename(val_data["img_path"][0])
self.feed_data(val_data)
self.test()
if len(self.score_A.shape) <= 1:
self.score_A = self.score_A.reshape(-1, 1)
self.score_B = self.score_B.reshape(-1, 1)
pred_score_A.append(self.score_A)
pred_score_B.append(self.score_B)
gt_mos.append(self.gt_mos)
if use_pbar:
pbar.update(1)
pbar.set_description(f"Test {img_name:>20}")
if use_pbar:
pbar.close()
pred_score_A = torch.cat(pred_score_A, dim=0).squeeze(1).cpu().numpy()
pred_score_B = torch.cat(pred_score_B, dim=0).squeeze(1).cpu().numpy()
gt_mos = torch.cat(gt_mos, dim=0).squeeze(1).cpu().numpy()
if with_metrics:
# calculate all metrics
for name, opt_ in self.opt["val"]["metrics"].items():
self.metric_results[name] = calculate_metric(
[pred_score_A, pred_score_B, gt_mos], opt_
)
if self.key_metric is not None:
# If the best metric is updated, update and save best model
to_update = self._update_best_metric_result(
dataset_name,
self.key_metric,
self.metric_results[self.key_metric],
current_iter,
)
if to_update:
for name, opt_ in self.opt["val"]["metrics"].items():
self._update_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
else:
# update each metric separately
updated = []
for name, opt_ in self.opt["val"]["metrics"].items():
tmp_updated = self._update_best_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
updated.append(tmp_updated)
# save best model if any metric is updated
if sum(updated):
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
| 5,987 | 36.898734 | 125 | py |
BVQI | BVQI-master/pyiqa/models/__init__.py | import importlib
from copy import deepcopy
from os import path as osp
from pyiqa.utils import get_root_logger, scandir
from pyiqa.utils.registry import MODEL_REGISTRY
__all__ = ["build_model"]
# automatically scan and import model modules for registry
# scan all the files under the 'models' folder and collect files ending with
# '_model.py'
model_folder = osp.dirname(osp.abspath(__file__))
model_filenames = [
osp.splitext(osp.basename(v))[0]
for v in scandir(model_folder)
if v.endswith("_model.py")
]
# import all the model modules
_model_modules = [
importlib.import_module(f"pyiqa.models.{file_name}")
for file_name in model_filenames
]
def build_model(opt):
"""Build model from options.
Args:
opt (dict): Configuration. It must contain:
model_type (str): Model type.
"""
opt = deepcopy(opt)
model = MODEL_REGISTRY.get(opt["model_type"])(opt)
logger = get_root_logger()
logger.info(f"Model [{model.__class__.__name__}] is created.")
return model
| 1,031 | 26.157895 | 76 | py |
BVQI | BVQI-master/pyiqa/models/inference_model.py | from collections import OrderedDict
import torch
import torchvision as tv
from pyiqa.default_model_configs import DEFAULT_CONFIGS
from pyiqa.utils.img_util import imread2tensor
from pyiqa.utils.registry import ARCH_REGISTRY
class InferenceModel(torch.nn.Module):
"""Common interface for quality inference of images with default setting of each metric."""
def __init__(
self, metric_name, as_loss=False, device=None, **kwargs # Other metric options
):
super(InferenceModel, self).__init__()
self.metric_name = metric_name
# ============ set metric properties ===========
self.lower_better = DEFAULT_CONFIGS[metric_name].get("lower_better", False)
self.metric_mode = DEFAULT_CONFIGS[metric_name].get("metric_mode", None)
if self.metric_mode is None:
self.metric_mode = kwargs.pop("metric_mode")
elif "metric_mode" in kwargs:
kwargs.pop("metric_mode")
if device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.as_loss = as_loss
# =========== define metric model ===============
net_opts = OrderedDict()
# load default setting first
if metric_name in DEFAULT_CONFIGS.keys():
default_opt = DEFAULT_CONFIGS[metric_name]["metric_opts"]
net_opts.update(default_opt)
# then update with custom setting
net_opts.update(kwargs)
network_type = net_opts.pop("type")
self.net = ARCH_REGISTRY.get(network_type)(**net_opts)
self.net = self.net.to(self.device)
self.net.eval()
def forward(self, target, ref=None, **kwargs):
torch.set_grad_enabled(self.as_loss)
if "fid" in self.metric_name:
output = self.net(target, ref, device=self.device, **kwargs)
else:
if not torch.is_tensor(target):
print("\nfound it\n")
target = imread2tensor(target)
target = target.unsqueeze(0)
if self.metric_mode == "FR":
assert (
ref is not None
), "Please specify reference image for Full Reference metric"
ref = imread2tensor(ref)
ref = ref.unsqueeze(0)
if self.metric_mode == "FR":
output = self.net(target.to(self.device), ref.to(self.device))
elif self.metric_mode == "NR":
output = self.net(target.to(self.device))
return output
| 2,613 | 36.884058 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.