repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
evaluate
|
evaluate-main/metrics/bertscore/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("bertscore")
launch_gradio_widget(module)
| 131 | 17.857143 | 47 |
py
|
evaluate
|
evaluate-main/metrics/bertscore/bertscore.py
|
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERTScore metric. """
import functools
from contextlib import contextmanager
import bert_score
import datasets
from packaging import version
import evaluate
@contextmanager
def filter_logging_context():
def filter_log(record):
return False if "This IS expected if you are initializing" in record.msg else True
logger = datasets.utils.logging.get_logger("transformers.modeling_utils")
logger.addFilter(filter_log)
try:
yield
finally:
logger.removeFilter(filter_log)
_CITATION = """\
@inproceedings{bert-score,
title={BERTScore: Evaluating Text Generation with BERT},
author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=SkeHuCVFDr}
}
"""
_DESCRIPTION = """\
BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference
sentences by cosine similarity.
It has been shown to correlate with human judgment on sentence-level and system-level evaluation.
Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language
generation tasks.
See the project's README at https://github.com/Tiiiger/bert_score#readme for more information.
"""
_KWARGS_DESCRIPTION = """
BERTScore Metrics with the hashcode from a source against one or more references.
Args:
predictions (list of str): Prediction/candidate sentences.
references (list of str or list of list of str): Reference sentences.
lang (str): Language of the sentences; required (e.g. 'en').
model_type (str): Bert specification, default using the suggested
model for the target language; has to specify at least one of
`model_type` or `lang`.
num_layers (int): The layer of representation to use,
default using the number of layers tuned on WMT16 correlation data.
verbose (bool): Turn on intermediate status update.
idf (bool or dict): Use idf weighting; can also be a precomputed idf_dict.
device (str): On which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
nthreads (int): Number of threads.
batch_size (int): Bert score processing batch size,
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
rescale_with_baseline (bool): Rescale bertscore with pre-computed baseline.
baseline_path (str): Customized baseline file.
use_fast_tokenizer (bool): `use_fast` parameter passed to HF tokenizer. New in version 0.3.10.
Returns:
precision: Precision.
recall: Recall.
f1: F1 score.
hashcode: Hashcode of the library.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bertscore = evaluate.load("bertscore")
>>> results = bertscore.compute(predictions=predictions, references=references, lang="en")
>>> print([round(v, 2) for v in results["f1"]])
[1.0, 1.0]
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class BERTScore(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/Tiiiger/bert_score",
inputs_description=_KWARGS_DESCRIPTION,
features=[
datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}
),
datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
],
codebase_urls=["https://github.com/Tiiiger/bert_score"],
reference_urls=[
"https://github.com/Tiiiger/bert_score",
"https://arxiv.org/abs/1904.09675",
],
)
def _compute(
self,
predictions,
references,
lang=None,
model_type=None,
num_layers=None,
verbose=False,
idf=False,
device=None,
batch_size=64,
nthreads=4,
all_layers=False,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False,
):
if isinstance(references[0], str):
references = [[ref] for ref in references]
if idf:
idf_sents = [r for ref in references for r in ref]
else:
idf_sents = None
get_hash = bert_score.utils.get_hash
scorer = bert_score.BERTScorer
if version.parse(bert_score.__version__) >= version.parse("0.3.10"):
get_hash = functools.partial(get_hash, use_fast_tokenizer=use_fast_tokenizer)
scorer = functools.partial(scorer, use_fast_tokenizer=use_fast_tokenizer)
elif use_fast_tokenizer:
raise ImportWarning(
"To use a fast tokenizer, the module `bert-score>=0.3.10` is required, and the current version of "
"`bert-score` doesn't match this condition.\n"
'You can install it with `pip install "bert-score>=0.3.10"`.'
)
if model_type is None:
if lang is None:
raise ValueError(
"Either 'lang' (e.g. 'en') or 'model_type' (e.g. 'microsoft/deberta-xlarge-mnli')"
" must be specified"
)
model_type = bert_score.utils.lang2model[lang.lower()]
if num_layers is None:
num_layers = bert_score.utils.model2layers[model_type]
hashcode = get_hash(
model=model_type,
num_layers=num_layers,
idf=idf,
rescale_with_baseline=rescale_with_baseline,
use_custom_baseline=baseline_path is not None,
)
with filter_logging_context():
if not hasattr(self, "cached_bertscorer") or self.cached_bertscorer.hash != hashcode:
self.cached_bertscorer = scorer(
model_type=model_type,
num_layers=num_layers,
batch_size=batch_size,
nthreads=nthreads,
all_layers=all_layers,
idf=idf,
idf_sents=idf_sents,
device=device,
lang=lang,
rescale_with_baseline=rescale_with_baseline,
baseline_path=baseline_path,
)
(P, R, F) = self.cached_bertscorer.score(
cands=predictions,
refs=references,
verbose=verbose,
batch_size=batch_size,
)
output_dict = {
"precision": P.tolist(),
"recall": R.tolist(),
"f1": F.tolist(),
"hashcode": hashcode,
}
return output_dict
| 7,950 | 35.810185 | 117 |
py
|
evaluate
|
evaluate-main/metrics/exact_match/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("exact_match")
launch_gradio_widget(module)
| 133 | 18.142857 | 47 |
py
|
evaluate
|
evaluate-main/metrics/exact_match/exact_match.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exact Match metric."""
import re
import string
import datasets
import numpy as np
import evaluate
_DESCRIPTION = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 1.0, inclusive.
Examples:
>>> exact_match = evaluate.load("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 2))
0.25
>>> exact_match = evaluate.load("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 2))
0.5
>>> exact_match = evaluate.load("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 2))
0.75
>>> exact_match = evaluate.load("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 2))
1.0
>>> exact_match = evaluate.load("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 2))
0.33
"""
_CITATION = """
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ExactMatch(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
reference_urls=[],
)
def _compute(
self,
predictions,
references,
regexes_to_ignore=None,
ignore_case=False,
ignore_punctuation=False,
ignore_numbers=False,
):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
predictions = np.array([re.sub(s, "", x) for x in predictions])
references = np.array([re.sub(s, "", x) for x in references])
else:
predictions = np.asarray(predictions)
references = np.asarray(references)
if ignore_case:
predictions = np.char.lower(predictions)
references = np.char.lower(references)
if ignore_punctuation:
repl_table = string.punctuation.maketrans("", "", string.punctuation)
predictions = np.char.translate(predictions, table=repl_table)
references = np.char.translate(references, table=repl_table)
if ignore_numbers:
repl_table = string.digits.maketrans("", "", string.digits)
predictions = np.char.translate(predictions, table=repl_table)
references = np.char.translate(references, table=repl_table)
score_list = predictions == references
return {"exact_match": np.mean(score_list)}
| 5,669 | 40.386861 | 181 |
py
|
evaluate
|
evaluate-main/metrics/rl_reliability/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("rl_reliability", "online")
launch_gradio_widget(module)
| 146 | 20 | 50 |
py
|
evaluate
|
evaluate-main/metrics/rl_reliability/rl_reliability.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes the RL Reliability Metrics."""
import datasets
import numpy as np
from rl_reliability_metrics.evaluation import eval_metrics
from rl_reliability_metrics.metrics import metrics_offline, metrics_online
import evaluate
logger = evaluate.logging.get_logger(__name__)
DEFAULT_EVAL_POINTS = [
50000,
150000,
250000,
350000,
450000,
550000,
650000,
750000,
850000,
950000,
1050000,
1150000,
1250000,
1350000,
1450000,
1550000,
1650000,
1750000,
1850000,
1950000,
]
N_RUNS_RECOMMENDED = 10
_CITATION = """\
@conference{rl_reliability_metrics,
title = {Measuring the Reliability of Reinforcement Learning Algorithms},
author = {Stephanie CY Chan, Sam Fishman, John Canny, Anoop Korattikara, and Sergio Guadarrama},
booktitle = {International Conference on Learning Representations, Addis Ababa, Ethiopia},
year = 2020,
}
"""
_DESCRIPTION = """\
Computes the RL reliability metrics from a set of experiments. There is an `"online"` and `"offline"` configuration for evaluation.
"""
_KWARGS_DESCRIPTION = """
Computes the RL reliability metrics from a set of experiments. There is an `"online"` and `"offline"` configuration for evaluation.
Args:
timestamps: list of timestep lists/arrays that serve as index.
rewards: list of reward lists/arrays of each experiment.
Returns:
dictionary: a set of reliability metrics
Examples:
>>> import numpy as np
>>> rl_reliability = evaluate.load("rl_reliability", "online")
>>> results = rl_reliability.compute(
... timesteps=[np.linspace(0, 2000000, 1000)],
... rewards=[np.linspace(0, 100, 1000)]
... )
>>> print(results["LowerCVaROnRaw"].round(4))
[0.0258]
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class RLReliability(evaluate.Metric):
"""Computes the RL Reliability Metrics."""
def _info(self):
if self.config_name not in ["online", "offline"]:
raise KeyError("""You should supply a configuration name selected in '["online", "offline"]'""")
return evaluate.MetricInfo(
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"timesteps": datasets.Sequence(datasets.Value("int64")),
"rewards": datasets.Sequence(datasets.Value("float")),
}
),
homepage="https://github.com/google-research/rl-reliability-metrics",
)
def _compute(
self,
timesteps,
rewards,
baseline="default",
freq_thresh=0.01,
window_size=100000,
window_size_trimmed=99000,
alpha=0.05,
eval_points=None,
):
if len(timesteps) < N_RUNS_RECOMMENDED:
logger.warning(
f"For robust statistics it is recommended to use at least {N_RUNS_RECOMMENDED} runs whereas you provided {len(timesteps)}."
)
curves = []
for timestep, reward in zip(timesteps, rewards):
curves.append(np.stack([timestep, reward]))
if self.config_name == "online":
if baseline == "default":
baseline = "curve_range"
if eval_points is None:
eval_points = DEFAULT_EVAL_POINTS
metrics = [
metrics_online.HighFreqEnergyWithinRuns(thresh=freq_thresh),
metrics_online.IqrWithinRuns(
window_size=window_size_trimmed, eval_points=eval_points, baseline=baseline
),
metrics_online.IqrAcrossRuns(
lowpass_thresh=freq_thresh, eval_points=eval_points, window_size=window_size, baseline=baseline
),
metrics_online.LowerCVaROnDiffs(baseline=baseline),
metrics_online.LowerCVaROnDrawdown(baseline=baseline),
metrics_online.LowerCVaROnAcross(
lowpass_thresh=freq_thresh, eval_points=eval_points, window_size=window_size, baseline=baseline
),
metrics_online.LowerCVaROnRaw(alpha=alpha, baseline=baseline),
metrics_online.MadAcrossRuns(
lowpass_thresh=freq_thresh, eval_points=eval_points, window_size=window_size, baseline=baseline
),
metrics_online.MadWithinRuns(
eval_points=eval_points, window_size=window_size_trimmed, baseline=baseline
),
metrics_online.MaxDrawdown(),
metrics_online.StddevAcrossRuns(
lowpass_thresh=freq_thresh, eval_points=eval_points, window_size=window_size, baseline=baseline
),
metrics_online.StddevWithinRuns(
eval_points=eval_points, window_size=window_size_trimmed, baseline=baseline
),
metrics_online.UpperCVaROnAcross(
alpha=alpha,
lowpass_thresh=freq_thresh,
eval_points=eval_points,
window_size=window_size,
baseline=baseline,
),
metrics_online.UpperCVaROnDiffs(alpha=alpha, baseline=baseline),
metrics_online.UpperCVaROnDrawdown(alpha=alpha, baseline=baseline),
metrics_online.UpperCVaROnRaw(alpha=alpha, baseline=baseline),
metrics_online.MedianPerfDuringTraining(window_size=window_size, eval_points=eval_points),
]
else:
if baseline == "default":
baseline = "median_perf"
metrics = [
metrics_offline.MadAcrossRollouts(baseline=baseline),
metrics_offline.IqrAcrossRollouts(baseline=baseline),
metrics_offline.StddevAcrossRollouts(baseline=baseline),
metrics_offline.LowerCVaRAcrossRollouts(alpha=alpha, baseline=baseline),
metrics_offline.UpperCVaRAcrossRollouts(alpha=alpha, baseline=baseline),
metrics_offline.MedianPerfAcrossRollouts(baseline=None),
]
evaluator = eval_metrics.Evaluator(metrics=metrics)
result = evaluator.compute_metrics(curves)
return result
| 7,052 | 36.716578 | 139 |
py
|
evaluate
|
evaluate-main/metrics/comet/app.py
|
import sys
import evaluate
from evaluate.utils import launch_gradio_widget
sys.path = [p for p in sys.path if p != "/home/user/app"]
module = evaluate.load("comet")
sys.path = ["/home/user/app"] + sys.path
launch_gradio_widget(module)
| 239 | 19 | 57 |
py
|
evaluate
|
evaluate-main/metrics/comet/comet.py
|
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" COMET metric.
Requirements:
pip install unbabel-comet
Usage:
```python
from evaluate import load
comet_metric = load('metrics/comet/comet.py')
#comet_metric = load('comet')
#comet_metric = load('comet', 'Unbabel/wmt20-comet-da')
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
predictions = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
predictions['scores']
```
"""
import comet # From: unbabel-comet
import datasets
import torch
from packaging import version
import evaluate
logger = evaluate.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{rei-etal-2022-comet,
title = "{COMET}-22: Unbabel-{IST} 2022 Submission for the Metrics Shared Task",
author = "Rei, Ricardo and
C. de Souza, Jos{\'e} G. and
Alves, Duarte and
Zerva, Chrysoula and
Farinha, Ana C and
Glushkova, Taisiya and
Lavie, Alon and
Coheur, Luisa and
Martins, Andr{\'e} F. T.",
booktitle = "Proceedings of the Seventh Conference on Machine Translation (WMT)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.wmt-1.52",
pages = "578--585",
}
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
"""
_DESCRIPTION = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
_KWARGS_DESCRIPTION = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`gpus` (bool): Number of GPUs to use. 0 for CPU
`progress_bar` (bool): Flag that turns on and off the predict progress bar. Defaults to True
Returns:
Dict with all sentence-level scores (`scores` key) a system-level score (`mean_score` key).
Examples:
>>> comet_metric = evaluate.load('comet')
>>> # comet_metric = load('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 3) for v in results["scores"]])
[0.839, 0.972]
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class COMET(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://unbabel.github.io/COMET/html/index.html",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"sources": datasets.Value("string", id="sequence"),
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/Unbabel/COMET"],
reference_urls=[
"https://github.com/Unbabel/COMET",
"https://aclanthology.org/2022.wmt-1.52/",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
],
)
def _download_and_prepare(self, dl_manager):
if self.config_name == "default":
if version.parse(comet.__version__) >= version.parse("2.0.0"):
self.scorer = comet.load_from_checkpoint(comet.download_model("Unbabel/wmt22-comet-da"))
else:
self.scorer = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
self.scorer = comet.load_from_checkpoint(comet.download_model(self.config_name))
def _compute(self, sources, predictions, references, gpus=None, progress_bar=False):
if gpus is None:
gpus = 1 if torch.cuda.is_available() else 0
data = {"src": sources, "mt": predictions, "ref": references}
data = [dict(zip(data, t)) for t in zip(*data.values())]
if version.parse(comet.__version__) >= version.parse("2.0.0"):
output = self.scorer.predict(data, gpus=gpus, progress_bar=progress_bar)
scores, mean_score = output.scores, output.system_score
else:
scores, mean_score = self.scorer.predict(data, gpus=gpus, progress_bar=progress_bar)
return {"mean_score": mean_score, "scores": scores}
| 6,967 | 39.511628 | 238 |
py
|
evaluate
|
evaluate-main/metrics/matthews_correlation/matthews_correlation.py
|
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matthews Correlation metric."""
import datasets
import numpy as np
from sklearn.metrics import matthews_corrcoef
import evaluate
_DESCRIPTION = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
_KWARGS_DESCRIPTION = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
average (`string`): This parameter is used for multilabel configs. Defaults to `None`.
- None (default): Returns an array of Matthews correlation coefficients, one for each feature
- 'macro': Calculate metrics for each feature, and find their unweighted mean.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = evaluate.load("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = evaluate.load("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = evaluate.load("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
Example 4, Multi-label without averaging:
>>> matthews_metric = evaluate.load("matthews_correlation", config_name="multilabel")
>>> results = matthews_metric.compute(references=[[0,1], [1,0], [1,1]],
... predictions=[[0,1], [1,1], [0,1]])
>>> print(results['matthews_correlation'])
[0.5, 0.0]
Example 5, Multi-label with averaging:
>>> matthews_metric = evaluate.load("matthews_correlation", config_name="multilabel")
>>> results = matthews_metric.compute(references=[[0,1], [1,0], [1,1]],
... predictions=[[0,1], [1,1], [0,1]],
... average='macro')
>>> print(round(results['matthews_correlation'], 2))
0.25
"""
_CITATION = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class MatthewsCorrelation(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}
),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
],
)
def _compute(self, predictions, references, sample_weight=None, average=None):
if self.config_name == "multilabel":
references = np.array(references)
predictions = np.array(predictions)
if not (references.ndim == 2 and predictions.ndim == 2):
raise ValueError("For multi-label inputs, both references and predictions should be 2-dimensional")
matthews_corr = [
matthews_corrcoef(predictions[:, i], references[:, i], sample_weight=sample_weight)
for i in range(references.shape[1])
]
if average == "macro":
matthews_corr = np.mean(matthews_corr)
elif average is not None:
raise ValueError("Invalid `average`: expected `macro`, or None ")
else:
matthews_corr = float(matthews_corrcoef(references, predictions, sample_weight=sample_weight))
return {"matthews_correlation": matthews_corr}
| 6,601 | 45.822695 | 115 |
py
|
evaluate
|
evaluate-main/metrics/matthews_correlation/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("matthews_correlation")
launch_gradio_widget(module)
| 142 | 19.428571 | 47 |
py
|
evaluate
|
evaluate-main/metrics/wiki_split/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("wiki_split")
launch_gradio_widget(module)
| 132 | 18 | 47 |
py
|
evaluate
|
evaluate-main/metrics/wiki_split/wiki_split.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" WIKI_SPLIT metric."""
import re
import string
from collections import Counter
import datasets
import sacrebleu
import sacremoses
from packaging import version
import evaluate
_CITATION = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
"""
_DESCRIPTION = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
_KWARGS_DESCRIPTION = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = evaluate.load("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_em(predictions, references):
scores = [any([compute_exact(ref, pred) for ref in refs]) for pred, refs in zip(predictions, references)]
return (sum(scores) / len(scores)) * 100
def SARIngram(sgrams, cgrams, rgramslist, numref):
rgramsall = [rgram for rgrams in rgramslist for rgram in rgrams]
rgramcounter = Counter(rgramsall)
sgramcounter = Counter(sgrams)
sgramcounter_rep = Counter()
for sgram, scount in sgramcounter.items():
sgramcounter_rep[sgram] = scount * numref
cgramcounter = Counter(cgrams)
cgramcounter_rep = Counter()
for cgram, ccount in cgramcounter.items():
cgramcounter_rep[cgram] = ccount * numref
# KEEP
keepgramcounter_rep = sgramcounter_rep & cgramcounter_rep
keepgramcountergood_rep = keepgramcounter_rep & rgramcounter
keepgramcounterall_rep = sgramcounter_rep & rgramcounter
keeptmpscore1 = 0
keeptmpscore2 = 0
for keepgram in keepgramcountergood_rep:
keeptmpscore1 += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscore2 += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
keepscore_precision = 1
keepscore_recall = 1
if len(keepgramcounter_rep) > 0:
keepscore_precision = keeptmpscore1 / len(keepgramcounter_rep)
if len(keepgramcounterall_rep) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
keepscore_recall = keeptmpscore2 / sum(keepgramcounterall_rep.values())
keepscore = 0
if keepscore_precision > 0 or keepscore_recall > 0:
keepscore = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
delgramcounter_rep = sgramcounter_rep - cgramcounter_rep
delgramcountergood_rep = delgramcounter_rep - rgramcounter
delgramcounterall_rep = sgramcounter_rep - rgramcounter
deltmpscore1 = 0
deltmpscore2 = 0
for delgram in delgramcountergood_rep:
deltmpscore1 += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscore2 += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
delscore_precision = 1
if len(delgramcounter_rep) > 0:
delscore_precision = deltmpscore1 / len(delgramcounter_rep)
# ADDITION
addgramcounter = set(cgramcounter) - set(sgramcounter)
addgramcountergood = set(addgramcounter) & set(rgramcounter)
addgramcounterall = set(rgramcounter) - set(sgramcounter)
addtmpscore = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
addscore_precision = 1
addscore_recall = 1
if len(addgramcounter) > 0:
addscore_precision = addtmpscore / len(addgramcounter)
if len(addgramcounterall) > 0:
addscore_recall = addtmpscore / len(addgramcounterall)
addscore = 0
if addscore_precision > 0 or addscore_recall > 0:
addscore = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SARIsent(ssent, csent, rsents):
numref = len(rsents)
s1grams = ssent.split(" ")
c1grams = csent.split(" ")
s2grams = []
c2grams = []
s3grams = []
c3grams = []
s4grams = []
c4grams = []
r1gramslist = []
r2gramslist = []
r3gramslist = []
r4gramslist = []
for rsent in rsents:
r1grams = rsent.split(" ")
r2grams = []
r3grams = []
r4grams = []
r1gramslist.append(r1grams)
for i in range(0, len(r1grams) - 1):
if i < len(r1grams) - 1:
r2gram = r1grams[i] + " " + r1grams[i + 1]
r2grams.append(r2gram)
if i < len(r1grams) - 2:
r3gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2]
r3grams.append(r3gram)
if i < len(r1grams) - 3:
r4gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2] + " " + r1grams[i + 3]
r4grams.append(r4gram)
r2gramslist.append(r2grams)
r3gramslist.append(r3grams)
r4gramslist.append(r4grams)
for i in range(0, len(s1grams) - 1):
if i < len(s1grams) - 1:
s2gram = s1grams[i] + " " + s1grams[i + 1]
s2grams.append(s2gram)
if i < len(s1grams) - 2:
s3gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2]
s3grams.append(s3gram)
if i < len(s1grams) - 3:
s4gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2] + " " + s1grams[i + 3]
s4grams.append(s4gram)
for i in range(0, len(c1grams) - 1):
if i < len(c1grams) - 1:
c2gram = c1grams[i] + " " + c1grams[i + 1]
c2grams.append(c2gram)
if i < len(c1grams) - 2:
c3gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2]
c3grams.append(c3gram)
if i < len(c1grams) - 3:
c4gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2] + " " + c1grams[i + 3]
c4grams.append(c4gram)
(keep1score, del1score, add1score) = SARIngram(s1grams, c1grams, r1gramslist, numref)
(keep2score, del2score, add2score) = SARIngram(s2grams, c2grams, r2gramslist, numref)
(keep3score, del3score, add3score) = SARIngram(s3grams, c3grams, r3gramslist, numref)
(keep4score, del4score, add4score) = SARIngram(s4grams, c4grams, r4gramslist, numref)
avgkeepscore = sum([keep1score, keep2score, keep3score, keep4score]) / 4
avgdelscore = sum([del1score, del2score, del3score, del4score]) / 4
avgaddscore = sum([add1score, add2score, add3score, add4score]) / 4
finalscore = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def normalize(sentence, lowercase: bool = True, tokenizer: str = "13a", return_str: bool = True):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
sentence = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__).major >= 2:
normalized_sent = sacrebleu.metrics.bleu._get_tokenizer(tokenizer)()(sentence)
else:
normalized_sent = sacrebleu.TOKENIZERS[tokenizer]()(sentence)
elif tokenizer == "moses":
normalized_sent = sacremoses.MosesTokenizer().tokenize(sentence, return_str=True, escape=False)
elif tokenizer == "penn":
normalized_sent = sacremoses.MosesTokenizer().penn_tokenize(sentence, return_str=True)
else:
normalized_sent = sentence
if not return_str:
normalized_sent = normalized_sent.split()
return normalized_sent
def compute_sari(sources, predictions, references):
if not (len(sources) == len(predictions) == len(references)):
raise ValueError("Sources length must match predictions and references lengths.")
sari_score = 0
for src, pred, refs in zip(sources, predictions, references):
sari_score += SARIsent(normalize(src), normalize(pred), [normalize(sent) for sent in refs])
sari_score = sari_score / len(predictions)
return 100 * sari_score
def compute_sacrebleu(
predictions,
references,
smooth_method="exp",
smooth_value=None,
force=False,
lowercase=False,
use_effective_order=False,
):
references_per_prediction = len(references[0])
if any(len(refs) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)]
output = sacrebleu.corpus_bleu(
predictions,
transformed_references,
smooth_method=smooth_method,
smooth_value=smooth_value,
force=force,
lowercase=lowercase,
use_effective_order=use_effective_order,
)
return output.score
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class WikiSplit(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=[
datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}
),
datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
],
codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
],
reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
],
)
def _compute(self, sources, predictions, references):
# if only one reference is provided make sure we still use list of lists
if isinstance(references[0], str):
references = [[ref] for ref in references]
result = {}
result.update({"sari": compute_sari(sources=sources, predictions=predictions, references=references)})
result.update({"sacrebleu": compute_sacrebleu(predictions=predictions, references=references)})
result.update({"exact": compute_em(predictions=predictions, references=references)})
return result
| 14,638 | 38.888283 | 121 |
py
|
evaluate
|
evaluate-main/metrics/squad/squad.py
|
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SQuAD metric. """
import datasets
import evaluate
from .compute_score import compute_score
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = evaluate.load("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Squad(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
def _compute(self, predictions, references):
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
dataset = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
score = compute_score(dataset=dataset, predictions=pred_dict)
return score
| 4,525 | 39.410714 | 113 |
py
|
evaluate
|
evaluate-main/metrics/squad/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("squad")
launch_gradio_widget(module)
| 127 | 17.285714 | 47 |
py
|
evaluate
|
evaluate-main/metrics/squad/compute_score.py
|
""" Official evaluation script for v1.1 of the SQuAD dataset. """
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_score(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(compute_score(dataset, predictions)))
| 3,317 | 34.677419 | 115 |
py
|
evaluate
|
evaluate-main/tests/test_evaluation_suite.py
|
from unittest import TestCase
from evaluate import EvaluationSuite
from tests.test_evaluator import DummyTextClassificationPipeline
class TestEvaluationSuite(TestCase):
def setUp(self):
# Check that the EvaluationSuite loads successfully
self.evaluation_suite = EvaluationSuite.load("evaluate/evaluation-suite-ci")
# Setup a dummy model for usage with the EvaluationSuite
self.dummy_model = DummyTextClassificationPipeline()
def test_running_evaluation_suite(self):
# Check that the evaluation suite successfully runs
results = self.evaluation_suite.run(self.dummy_model)
# Check that the results are correct
for r in results:
self.assertEqual(r["accuracy"], 0.5)
# Check that correct number of tasks were run
self.assertEqual(len(results), 2)
def test_empty_suite(self):
self.empty_suite = self.evaluation_suite
self.empty_suite.suite = []
self.assertRaises(ValueError, self.empty_suite.run, self.dummy_model)
| 1,049 | 31.8125 | 84 |
py
|
evaluate
|
evaluate-main/tests/test_hub.py
|
import glob
from unittest import TestCase
from unittest.mock import patch
import pytest
import requests
import yaml
from evaluate.hub import push_to_hub
from tests.test_metric import DummyMetric
minimum_metadata = {
"model-index": [
{
"results": [
{
"task": {"type": "dummy-task"},
"dataset": {"type": "dataset_type", "name": "dataset_name"},
"metrics": [
{"type": "dummy_metric", "value": 1.0, "name": "Pretty Metric Name"},
],
}
]
}
]
}
extras_metadata = {
"model-index": [
{
"results": [
{
"task": {"type": "dummy-task", "name": "task_name"},
"dataset": {
"type": "dataset_type",
"name": "dataset_name",
"config": "fr",
"split": "test",
"revision": "abc",
"args": {"a": 1, "b": 2},
},
"metrics": [
{
"type": "dummy_metric",
"value": 1.0,
"name": "Pretty Metric Name",
"config": "default",
"args": {"hello": 1, "world": 2},
},
],
}
]
}
]
}
@patch("evaluate.hub.HF_HUB_ALLOWED_TASKS", ["dummy-task"])
@patch("evaluate.hub.dataset_info", lambda x: True)
@patch("evaluate.hub.model_info", lambda x: True)
@patch("evaluate.hub.metadata_update")
class TestHub(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def setUp(self):
self.metric = DummyMetric()
self.metric.add()
self.args = {"hello": 1, "world": 2}
self.result = self.metric.compute()
def test_push_metric_required_arguments(self, metadata_update):
push_to_hub(
model_id="username/repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dataset_type",
task_type="dummy-task",
)
metadata_update.assert_called_once_with(repo_id="username/repo", metadata=minimum_metadata, overwrite=False)
def test_push_metric_missing_arguments(self, metadata_update):
with pytest.raises(TypeError):
push_to_hub(
model_id="username/repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dummy-task",
)
def test_push_metric_invalid_arguments(self, metadata_update):
with pytest.raises(TypeError):
push_to_hub(
model_id="username/repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dataset_type",
task_type="dummy-task",
random_value="incorrect",
)
def test_push_metric_extra_arguments(self, metadata_update):
push_to_hub(
model_id="username/repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dataset_type",
dataset_config="fr",
dataset_split="test",
dataset_revision="abc",
dataset_args={"a": 1, "b": 2},
task_type="dummy-task",
task_name="task_name",
metric_config=self.metric.config_name,
metric_args=self.args,
)
metadata_update.assert_called_once_with(repo_id="username/repo", metadata=extras_metadata, overwrite=False)
def test_push_metric_invalid_task_type(self, metadata_update):
with pytest.raises(ValueError):
push_to_hub(
model_id="username/repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dataset_type",
task_type="audio-classification",
)
def test_push_metric_invalid_dataset_type(self, metadata_update):
with patch("evaluate.hub.dataset_info") as mock_dataset_info:
mock_dataset_info.side_effect = requests.HTTPError()
push_to_hub(
model_id="username/repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dataset_type",
task_type="dummy-task",
)
assert "Dataset dataset_type not found on the Hub at hf.co/datasets/dataset_type" in self._caplog.text
metadata_update.assert_called_once_with(
repo_id="username/repo", metadata=minimum_metadata, overwrite=False
)
def test_push_metric_invalid_model_id(self, metadata_update):
with patch("evaluate.hub.model_info") as mock_model_info:
mock_model_info.side_effect = requests.HTTPError()
with pytest.raises(ValueError):
push_to_hub(
model_id="username/bad-repo",
metric_value=self.result["accuracy"],
metric_name="Pretty Metric Name",
metric_type=self.metric.name,
dataset_name="dataset_name",
dataset_type="dataset_type",
task_type="dummy-task",
)
class ValidateYaml(TestCase):
def setUp(self):
pass
def testLoadingCards(self):
readme_filepaths = []
for glob_path in ["measurements/*/README.md", "metrics/*/README.md", "comparisons/*/README.md"]:
readme_filepaths.extend(glob.glob(glob_path))
for readme_file in readme_filepaths:
with open(readme_file, encoding="utf8") as f_yaml:
x = yaml.safe_load_all(f_yaml)
self.assertIsInstance(next(x), dict)
| 6,724 | 34.771277 | 116 |
py
|
evaluate
|
evaluate-main/tests/conftest.py
|
import csv
import json
import lzma
import os
import tarfile
import textwrap
import datasets
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets import config
from datasets.arrow_dataset import Dataset
from datasets.features import ClassLabel, Features, Sequence, Value
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_evaluate_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("evaluate.config.HF_EVALUATE_CACHE", str(test_hf_evaluate_cache))
monkeypatch.setattr("evaluate.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("evaluate.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_DOWNLOADED_EVALUATE_PATH = test_hf_evaluate_cache / "downloads"
monkeypatch.setattr("evaluate.config.DOWNLOADED_EVALUATE_PATH", str(test_DOWNLOADED_EVALUATE_PATH))
test_EXTRACTED_EVALUATE_PATH = test_hf_evaluate_cache / "downloads" / "extracted"
monkeypatch.setattr("evaluate.config.EXTRACTED_EVALUATE_PATH", str(test_EXTRACTED_EVALUATE_PATH))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("evaluate.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
FILE_CONTENT = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session")
def dataset():
n = 10
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
}
),
"id": Value("int64"),
}
)
dataset = Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(n)),
},
features=features,
)
return dataset
@pytest.fixture(scope="session")
def arrow_file(tmp_path_factory, dataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=filename)
return filename
@pytest.fixture(scope="session")
def text_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.txt"
data = FILE_CONTENT
with open(filename, "w") as f:
f.write(data)
return filename
@pytest.fixture(scope="session")
def xz_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.txt.xz"
data = bytes(FILE_CONTENT, "utf-8")
with lzma.open(filename, "wb") as f:
f.write(data)
return filename
@pytest.fixture(scope="session")
def gz_file(tmp_path_factory):
import gzip
path = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
data = bytes(FILE_CONTENT, "utf-8")
with gzip.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def bz2_file(tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "file.txt.bz2"
data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zstd_file(tmp_path_factory):
if config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
path = tmp_path_factory.mktemp("data") / "file.txt.zst"
data = bytes(FILE_CONTENT, "utf-8")
with zstd.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def lz4_file(tmp_path_factory):
if config.LZ4_AVAILABLE:
import lz4.frame
path = tmp_path_factory.mktemp("data") / "file.txt.lz4"
data = bytes(FILE_CONTENT, "utf-8")
with lz4.frame.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def xml_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.xml"
data = textwrap.dedent(
"""\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>"""
)
with open(filename, "w") as f:
f.write(data)
return filename
DATA = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
DATA2 = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
DATA_DICT_OF_LISTS = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
DATA_312 = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
DATA_STR = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def dataset_dict():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def arrow_path(tmp_path_factory):
dataset = Dataset.from_dict(DATA_DICT_OF_LISTS)
path = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=path)
return path
@pytest.fixture(scope="session")
def csv_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def csv2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset2.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def bz2_csv_path(csv_path, tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(csv_path, "rb") as f:
data = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zip_csv_path(csv_path, csv2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path))
f.write(csv2_path, arcname=os.path.basename(csv2_path))
return path
@pytest.fixture(scope="session")
def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path)))
f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path)))
return path
@pytest.fixture(scope="session")
def parquet_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
schema = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.int64(),
"col_3": pa.float64(),
}
)
with open(path, "wb") as f:
writer = pq.ParquetWriter(f, schema=schema)
pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema)
writer.write_table(pa_table)
writer.close()
return path
@pytest.fixture(scope="session")
def json_list_of_dicts_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def json_dict_of_lists_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA_DICT_OF_LISTS}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def jsonl_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_312_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(path, "w") as f:
for item in DATA_312:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_str_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(path, "w") as f:
for item in DATA_STR:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def text_gz_path(tmp_path_factory, text_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(text_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def jsonl_gz_path(tmp_path_factory, jsonl_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(jsonl_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(jsonl_path, arcname=os.path.basename(jsonl_path))
f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path))
return path
@pytest.fixture(scope="session")
def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path)))
f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path)))
return path
@pytest.fixture(scope="session")
def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar"
with tarfile.TarFile(path, "w") as f:
f.add(jsonl_path, arcname=os.path.basename(jsonl_path))
f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path))
return path
@pytest.fixture(scope="session")
def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar"
with tarfile.TarFile(path, "w") as f:
f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path)))
return path
@pytest.fixture(scope="session")
def text_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def text2_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset2.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def zip_text_path(text_path, text2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset.text.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.basename(text_path))
f.write(text2_path, arcname=os.path.basename(text2_path))
return path
@pytest.fixture(scope="session")
def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path)))
f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path)))
return path
@pytest.fixture(scope="session")
def text_path_with_unicode_new_lines(tmp_path_factory):
text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"])
path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt")
with open(path, "w", encoding="utf-8") as f:
f.write(text)
return path
| 14,441 | 29.925054 | 117 |
py
|
evaluate
|
evaluate-main/tests/test_evaluator.py
|
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from time import sleep
from unittest import TestCase, mock
import numpy as np
from datasets import ClassLabel, Dataset, Features, Sequence, Value
from PIL import Image
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
AutoModelForImageClassification,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoTokenizer,
pipeline,
)
from evaluate import (
AudioClassificationEvaluator,
AutomaticSpeechRecognitionEvaluator,
Evaluator,
ImageClassificationEvaluator,
QuestionAnsweringEvaluator,
Text2TextGenerationEvaluator,
TextClassificationEvaluator,
TextGenerationEvaluator,
TokenClassificationEvaluator,
evaluator,
load,
)
from .utils import slow
class DummyTextGenerationPipeline:
def __init__(self, prefix="generated", task="text-generation", num_return_sequences=1):
self.task = task
self.prefix = prefix
self.num_return_sequences = num_return_sequences
def __call__(self, inputs, **kwargs):
return [[{f"{self.prefix}_text": "Lorem ipsum"} for _ in range(self.num_return_sequences)] for _ in inputs]
class DummyText2TextGenerationPipeline:
def __init__(self, prefix="generated", task="text2text-generation"):
self.task = task
self.prefix = prefix
def __call__(self, inputs, **kwargs):
return [{f"{self.prefix}_text": "Lorem ipsum"} for _ in inputs]
class DummyTextClassificationPipeline:
def __init__(self, sleep_time=None):
self.task = "text-classification"
self.sleep_time = sleep_time
def __call__(self, inputs, **kwargs):
if self.sleep_time is not None:
sleep(self.sleep_time)
return [{"label": "NEGATIVE"} if i % 2 == 1 else {"label": "POSITIVE"} for i, _ in enumerate(inputs)]
class DummyImageClassificationPipeline:
def __init__(self):
self.task = "image-classification"
def __call__(self, images, **kwargs):
return [[{"score": 0.9, "label": "yurt"}, {"score": 0.1, "label": "umbrella"}] for i, _ in enumerate(images)]
class DummyQuestionAnsweringPipeline:
def __init__(self, v2: bool):
self.task = "question-answering"
self.v2 = v2
def __call__(self, question, context, **kwargs):
if self.v2:
return [
{"score": 0.95, "start": 31, "end": 39, "answer": "Felix"}
if i % 2 == 0
else {"score": 0.95, "start": 0, "end": 0, "answer": ""}
for i in range(len(question))
]
else:
return [{"score": 0.95, "start": 31, "end": 39, "answer": "Felix"} for _ in question]
class DummyTokenClassificationPipeline:
def __init__(self):
self.task = "token-classification"
def __call__(self, inputs, **kwargs):
result = [
{"start": 0, "entity": "B-LOC"},
{"start": 2, "entity": "I-LOC"},
{"start": 4, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
return [result]
class DummyAutomaticSpeechRecognitionPipeline:
def __init__(self) -> None:
self.task = "automatic-speech-recognition"
def __call__(self, inputs, **kwargs):
return [{"text": "Lorem ipsum"} for _ in inputs]
class DummyAudioClassificationPipeline:
def __init__(self):
self.task = "audio-classification"
def __call__(self, audio, **kwargs):
return [[{"score": 0.9, "label": "yes"}, {"score": 0.1, "label": "no"}] for i, _ in enumerate(audio)]
class TestEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]})
self.default_ckpt = "hf-internal-testing/tiny-random-bert"
self.default_model = AutoModelForSequenceClassification.from_pretrained(self.default_ckpt, num_labels=2)
self.default_tokenizer = AutoTokenizer.from_pretrained(self.default_ckpt)
self.pipe = pipeline("text-classification", model=self.default_model, tokenizer=self.default_tokenizer)
self.evaluator = evaluator("text-classification")
self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]})
self.label_mapping = {"LABEL_0": 0.0, "LABEL_1": 1.0}
def test_wrong_task(self):
self.assertRaises(KeyError, evaluator, "bad_task")
def test_device_placement(self):
orig_import = __import__
pt_mock = mock.Mock()
tf_mock = mock.Mock()
# mock import of torch and tensorflow
def import_pt_tf_mock(name, *args):
if name == "torch":
if pt_available:
return pt_mock
else:
raise ImportError
if name == "tensorflow":
if tf_available:
return tf_mock
else:
raise ImportError
return orig_import(name, *args)
with mock.patch("builtins.__import__", side_effect=import_pt_tf_mock):
# neither pt or tf are available
pt_available = False
tf_available = False
self.assertEqual(Evaluator._infer_device(), -1)
# pt available but no GPU
pt_available = True
pt_mock.cuda.is_available.return_value = False
self.assertEqual(Evaluator._infer_device(), -1)
# pt available and GPU found
pt_mock.cuda.is_available.return_value = True
self.assertEqual(Evaluator._infer_device(), 0)
# tf available but no GPU
pt_available = False
tf_available = True
tf_mock.config.list_physical_devices.return_value = []
self.assertEqual(Evaluator._infer_device(), -1)
# tf available and GPU found
tf_mock.config.list_physical_devices.return_value = ["GPU:0", "GPU:1"]
self.assertEqual(Evaluator._infer_device(), 0)
# pt accelerator found and pipeline instantiated on CPU
pt_mock.cuda.is_available.return_value = True
self.assertRaises(
ValueError, Evaluator.check_for_mismatch_in_device_setup, Evaluator._infer_device(), self.pipe
)
# tf accelerator found and pipeline instantiated on CPU
pt_available = False
tf_available = True
self.assertRaises(
ValueError, Evaluator.check_for_mismatch_in_device_setup, Evaluator._infer_device(), self.pipe
)
def test_pipe_init(self):
self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
def test_model_init(self):
self.evaluator.compute(
model_or_pipeline=self.default_model,
tokenizer=self.default_tokenizer,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
def test_model_str_init(self):
self.evaluator.compute(
model_or_pipeline=self.default_ckpt,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
class TestTextClassificationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]})
self.default_model = "lvwerra/distilbert-imdb"
self.input_column = "text"
self.label_column = "label"
self.pipe = DummyTextClassificationPipeline()
self.perf_pipe = DummyTextClassificationPipeline(sleep_time=0.1)
self.evaluator = evaluator("text-classification")
self.label_mapping = {"NEGATIVE": 0.0, "POSITIVE": 1.0}
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
input_column="text",
label_column="label",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
input_column=self.input_column,
label_column=self.label_column,
label_mapping=self.label_mapping,
)
model = AutoModelForSequenceClassification.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
self.assertEqual(results["accuracy"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
tokenizer=tokenizer,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
def test_class_init(self):
evaluator = TextClassificationEvaluator()
self.assertEqual(evaluator.task, "text-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="f1",
label_mapping=self.label_mapping,
)
self.assertEqual(results["f1"], 1.0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
def test_data_loading(self):
# Test passing in dataset by name with split
data = self.evaluator.load_data("evaluate/imdb-ci", split="test[:1]")
self.evaluator.prepare_data(data=data, input_column="text", label_column="label", second_input_column=None)
# Test passing in dataset by name without split and inferring the optimal split
data = self.evaluator.load_data("evaluate/imdb-ci")
self.evaluator.prepare_data(data=data, input_column="text", label_column="label", second_input_column=None)
# Test that it chooses the correct one (e.g. imdb only has train and test, but no validation)
self.assertEqual(data.split, "test")
# Test that the data point returned is correct; this maps to the first example in the dataset
self.assertEqual(data[0]["text"], "I love movies about whales!")
# Test loading subset of a dataset with the `name` field
data = self.evaluator.load_data("evaluate/glue-ci", subset="cola", split="test")
self.assertEqual(isinstance(data, Dataset), True)
# Test loading subset of a dataset with the `name` field and having it infer the split
data = self.evaluator.load_data("evaluate/glue-ci", subset="cola")
self.assertEqual(isinstance(data, Dataset), True)
def test_overwrite_default_metric(self):
accuracy = load("accuracy")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
def test_bootstrap(self):
data = Dataset.from_dict({"label": [1, 0, 0], "text": ["great movie", "great movie", "horrible movie"]})
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=data,
metric="accuracy",
label_mapping=self.label_mapping,
strategy="bootstrap",
n_resamples=10,
random_state=0,
)
self.assertAlmostEqual(results["accuracy"]["score"], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.33333, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["standard_error"], 0.22498, 5)
def test_perf(self):
results = self.evaluator.compute(
model_or_pipeline=self.perf_pipe,
data=self.data,
metric="accuracy",
input_column=self.input_column,
label_column=self.label_column,
label_mapping=self.label_mapping,
n_resamples=10,
random_state=0,
)
self.assertEqual(results["accuracy"], 1.0)
self.assertAlmostEqual(results["total_time_in_seconds"], 0.1, 1)
self.assertAlmostEqual(results["samples_per_second"], len(self.data) / results["total_time_in_seconds"], 5)
self.assertAlmostEqual(results["latency_in_seconds"], results["total_time_in_seconds"] / len(self.data), 5)
def test_bootstrap_and_perf(self):
data = Dataset.from_dict({"label": [1, 0, 0], "text": ["great movie", "great movie", "horrible movie"]})
results = self.evaluator.compute(
model_or_pipeline=self.perf_pipe,
data=data,
metric="accuracy",
input_column=self.input_column,
label_column=self.label_column,
label_mapping=self.label_mapping,
strategy="bootstrap",
n_resamples=10,
random_state=0,
)
self.assertAlmostEqual(results["accuracy"]["score"], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][0], 0.333333, 5)
self.assertAlmostEqual(results["accuracy"]["confidence_interval"][1], 0.666666, 5)
self.assertAlmostEqual(results["accuracy"]["standard_error"], 0.22498285, 5)
self.assertAlmostEqual(results["total_time_in_seconds"], 0.1, 1)
self.assertAlmostEqual(results["samples_per_second"], len(data) / results["total_time_in_seconds"], 5)
self.assertAlmostEqual(results["latency_in_seconds"], results["total_time_in_seconds"] / len(data), 5)
class TestTextClassificationEvaluatorTwoColumns(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"label": [1, 0],
"premise": ["great car", "great movie"],
"hypothesis": ["great vehicle", "horrible movie"],
}
)
self.default_model = "prajjwal1/bert-tiny-mnli"
self.input_column = "premise"
self.second_input_column = "hypothesis"
self.label_column = "label"
self.pipe = DummyTextClassificationPipeline()
self.evaluator = evaluator("text-classification")
self.label_mapping = {"NEGATIVE": 0.0, "POSITIVE": 1.0}
self.label_mapping2 = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
input_column=self.input_column,
second_input_column=self.second_input_column,
label_column="label",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 1.0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
input_column=self.input_column,
second_input_column=self.second_input_column,
label_column=self.label_column,
label_mapping=self.label_mapping2,
)
self.assertEqual(results["accuracy"], 1.0)
model = AutoModelForSequenceClassification.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
input_column=self.input_column,
second_input_column=self.second_input_column,
tokenizer=tokenizer,
label_mapping=self.label_mapping2,
)
self.assertEqual(results["accuracy"], 1.0)
class TestImageClassificationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"label": [2, 2],
"image": [Image.new("RGB", (500, 500), (255, 255, 255)), Image.new("RGB", (500, 500), (170, 95, 170))],
}
)
self.default_model = "lysandre/tiny-vit-random"
self.pipe = DummyImageClassificationPipeline()
self.evaluator = evaluator("image-classification")
self.label_mapping = AutoConfig.from_pretrained(self.default_model).label2id
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
model = AutoModelForImageClassification.from_pretrained(self.default_model)
feature_extractor = AutoFeatureExtractor.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
feature_extractor=feature_extractor,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_class_init(self):
evaluator = ImageClassificationEvaluator()
self.assertEqual(evaluator.task, "image-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_overwrite_default_metric(self):
accuracy = load("accuracy")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
class TestQuestionAnsweringEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"id": ["56be4db0acb8001400a502ec", "56be4db0acb8001400a502ed"],
"context": ["My name is Felix and I love cookies!", "Misa name is Felix and misa love cookies!"],
"answers": [{"text": ["Felix"], "answer_start": [11]}, {"text": ["Felix"], "answer_start": [13]}],
"question": ["What is my name?", "What is my name?"],
}
)
self.data_v2 = Dataset.from_dict(
{
"id": ["56be4db0acb8001400a502ec", "56be4db0acb8001400a502ed"],
"context": ["My name is Felix and I love cookies!", "Let's explore the city!"],
"answers": [{"text": ["Felix"], "answer_start": [11]}, {"text": [], "answer_start": []}],
"question": ["What is my name?", "What is my name?"],
}
)
self.default_model = "mrm8488/bert-tiny-finetuned-squadv2"
self.pipe = DummyQuestionAnsweringPipeline(v2=False)
self.pipe_v2 = DummyQuestionAnsweringPipeline(v2=True)
self.evaluator = evaluator("question-answering")
def test_pipe_init(self):
# squad_v1-like dataset
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
@slow
def test_model_init(self):
# squad_v1-like dataset
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="squad",
)
self.assertEqual(results["exact_match"], 0)
self.assertEqual(results["f1"], 100 / 3)
model = AutoModelForQuestionAnswering.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="squad",
tokenizer=tokenizer,
)
self.assertEqual(results["exact_match"], 0)
self.assertEqual(results["f1"], 100 / 3)
def test_class_init(self):
# squad_v1-like dataset
evaluator = QuestionAnsweringEvaluator()
self.assertEqual(evaluator.task, "question-answering")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="squad",
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
# squad_v2-like dataset
evaluator = QuestionAnsweringEvaluator()
self.assertEqual(evaluator.task, "question-answering")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe_v2,
data=self.data_v2,
metric="squad_v2",
)
self.assertDictEqual(
{key: results[key] for key in ["HasAns_f1", "NoAns_f1"]}, {"HasAns_f1": 100.0, "NoAns_f1": 100.0}
)
@slow
def test_default_pipe_init(self):
# squad_v1-like dataset
results = self.evaluator.compute(
data=self.data,
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
# squad_v2-like dataset
results = self.evaluator.compute(
data=self.data_v2,
metric="squad_v2",
)
self.assertDictEqual(
{key: results[key] for key in ["HasAns_f1", "NoAns_f1"]}, {"HasAns_f1": 100.0, "NoAns_f1": 0.0}
)
def test_data_loading(self):
# Test passing in dataset by name with data_split
data = self.evaluator.load_data("evaluate/squad-ci", split="validation[:1]")
self.evaluator.prepare_data(
data=data, question_column="question", context_column="context", id_column="id", label_column="answers"
)
# Test passing in dataset by name without data_split and inferring the optimal split
data = self.evaluator.load_data("evaluate/squad-ci")
self.evaluator.prepare_data(
data=data, question_column="question", context_column="context", id_column="id", label_column="answers"
)
# Test that it chooses the correct one (e.g. squad only has train and validation, but no test)
self.assertEqual(data.split, "validation")
# Test that the data point returned is correct; this maps to the first example in the squad-ci dataset
self.assertEqual(data[0]["id"], "56be4db0acb8001400a502ec")
def test_overwrite_default_metric(self):
# squad_v1-like dataset
squad = load("squad")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=squad,
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="squad",
)
self.assertEqual(results["exact_match"], 100.0)
self.assertEqual(results["f1"], 100.0)
class TestTokenClassificationEvaluator(TestCase):
def setUp(self):
features = Features(
{
"tokens": Sequence(feature=Value(dtype="string")),
"ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC"])),
}
)
self.data = Dataset.from_dict(
{
"tokens": [["New", "York", "a", "nice", "City", "."]],
"ner_tags": [[1, 2, 0, 0, 1, 0]],
},
features=features,
)
self.default_model = "hf-internal-testing/tiny-bert-for-token-classification"
self.pipe = DummyTokenClassificationPipeline()
self.evaluator = evaluator("token-classification")
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="seqeval",
)
self.assertEqual(results["overall_accuracy"], 0.5)
model = AutoModelForTokenClassification.from_pretrained(self.default_model)
tokenizer = AutoTokenizer.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="seqeval",
tokenizer=tokenizer,
)
self.assertEqual(results["overall_accuracy"], 0.5)
def test_class_init(self):
evaluator = TokenClassificationEvaluator()
self.assertEqual(evaluator.task, "token-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="seqeval",
)
self.assertEqual(results["overall_accuracy"], 1.0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
)
self.assertEqual(results["overall_accuracy"], 2 / 3)
def test_overwrite_default_metric(self):
accuracy = load("seqeval")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
)
self.assertEqual(results["overall_accuracy"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="seqeval",
)
self.assertEqual(results["overall_accuracy"], 1.0)
def test_data_loading(self):
# Test passing in dataset by name with data_split
data = self.evaluator.load_data("evaluate/conll2003-ci", split="validation[:1]")
self.evaluator.prepare_data(
data=data,
input_column="tokens",
label_column="ner_tags",
join_by=" ",
)
# Test passing in dataset by name without data_split and inferring the optimal split
data = self.evaluator.load_data("evaluate/conll2003-ci")
self.evaluator.prepare_data(
data=data,
input_column="tokens",
label_column="ner_tags",
join_by=" ",
)
# Test that it chooses the correct one (e.g. conll2003 has train, validation, test but should select test)
self.assertEqual(data.split, "test")
# Test that the data point returned is correct; this maps to the first example in the dataset
self.assertEqual(data[0]["id"], "0")
def test_wrong_task(self):
self.assertRaises(KeyError, evaluator, "bad_task")
def test_words_to_offsets(self):
task_evaluator = evaluator("token-classification")
words = ["This", "is", "a", "test", "."]
join_by = " "
offsets = task_evaluator.words_to_offsets(words, join_by)
self.assertListEqual([(0, 3), (5, 6), (8, 8), (10, 13), (15, 15)], offsets)
words = ["日", "本", "語", "はなせるの?"]
join_by = ""
offsets = task_evaluator.words_to_offsets(words, join_by)
self.assertListEqual([(0, 0), (1, 1), (2, 2), (3, 8)], offsets)
def test_predictions_processor(self):
task_evaluator = evaluator("token-classification")
join_by = " "
words = [["New", "York", "a", "nice", "City", "."]]
# aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 2, "entity": "I-LOC"},
{"start": 4, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "I-LOC", "O", "O", "B-LOC", "O"])
# non-aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 2, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "O", "O", "O", "B-LOC", "O"])
# non-aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 6, "entity": "I-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "O", "O", "O", "B-LOC", "O"])
# non-aligned start and words
predictions = [
[
{"start": 0, "entity": "B-LOC"},
{"start": 9, "entity": "O"},
{"start": 11, "entity": "O"},
{"start": 16, "entity": "B-LOC"},
{"start": 21, "entity": "O"},
]
]
predictions = task_evaluator.predictions_processor(predictions, words, join_by)
self.assertListEqual(predictions["predictions"][0], ["B-LOC", "O", "O", "O", "B-LOC", "O"])
class TestTextGenerationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict({"text": ["Lorem ipsum"]})
self.pipe = DummyTextGenerationPipeline(num_return_sequences=4)
self.evaluator = evaluator("text-generation")
def test_class_init(self):
evaluator = TextGenerationEvaluator()
self.assertEqual(evaluator.task, "text-generation")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="word_count",
)
self.assertIsInstance(results["unique_words"], int)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(data=self.data)
self.assertIsInstance(results["unique_words"], int)
def test_overwrite_default_metric(self):
word_length = load("word_length")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=word_length,
)
self.assertIsInstance(results["average_word_length"], int)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="word_length",
)
self.assertIsInstance(results["average_word_length"], int)
def test_process_predictions_multiple_return_sequences(self):
processed_predictions = self.evaluator.predictions_processor(
[
[{"generated_text": "A"}, {"generated_text": "B"}],
[{"generated_text": "C"}, {"generated_text": "D"}],
]
)
self.assertEqual(processed_predictions, {"data": ["A", "B", "C", "D"]})
class TestText2TextGenerationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"text": ["Lorem ipsum"] * 4,
"label": ["Ipsum Lorem"] * 4,
}
)
self.pipe = DummyText2TextGenerationPipeline()
self.evaluator = evaluator("text2text-generation")
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
)
self.assertEqual(results["bleu"], 0)
def test_class_init(self):
evaluator = Text2TextGenerationEvaluator()
self.assertEqual(evaluator.task, "text2text-generation")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="bleu",
)
self.assertEqual(results["bleu"], 0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(data=self.data)
self.assertEqual(results["bleu"], 0)
def test_overwrite_default_metric(self):
rouge = load("rouge")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=rouge,
)
self.assertEqual(results["rouge1"], 1.0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="rouge",
)
self.assertEqual(results["rouge1"], 1.0)
def test_summarization(self):
pipe = DummyText2TextGenerationPipeline(task="summarization", prefix="summary")
e = evaluator("summarization")
results = e.compute(
model_or_pipeline=pipe,
data=self.data,
)
self.assertEqual(results["rouge1"], 1.0)
def test_translation(self):
pipe = DummyText2TextGenerationPipeline(task="translation", prefix="translation")
e = evaluator("translation")
results = e.compute(
model_or_pipeline=pipe,
data=self.data,
)
self.assertEqual(results["bleu"], 0)
class TestAutomaticSpeechRecognitionEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{
"path": [
# Examples copied from default speech model of
# `automic-speech-recognition` pipeline:
# https://huggingface.co/facebook/wav2vec2-base-960h
# https://github.com/huggingface/transformers/blob/main/src/transformers/pipelines/__init__.py#L161
"https://cdn-media.huggingface.co/speech_samples/sample1.flac",
"https://cdn-media.huggingface.co/speech_samples/sample2.flac",
],
"sentence": ["Ipsum Lorem"] * 2,
}
)
self.pipe = DummyAutomaticSpeechRecognitionPipeline()
self.evaluator = evaluator("automatic-speech-recognition")
def test_pipe_init(self):
print(self.evaluator)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
)
print(results)
self.assertEqual(results["wer"], 1.0)
def test_class_init(self):
evaluator = AutomaticSpeechRecognitionEvaluator()
self.assertEqual(evaluator.task, "automatic-speech-recognition")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="wer",
)
self.assertEqual(results["wer"], 1.0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(data=self.data)
self.assertGreater(results["wer"], 1.0)
def test_overwrite_default_metric(self):
cer = load("cer")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=cer,
)
self.assertEqual(results["cer"], 0.7272727272727273)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="cer",
)
self.assertEqual(results["cer"], 0.7272727272727273)
class TestAudioClassificationEvaluator(TestCase):
def setUp(self):
self.data = Dataset.from_dict(
{"file": ["https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac"], "label": [11]}
)
self.raw_data = Dataset.from_dict(
{
"audio": [
np.array(
[-0.00048828, -0.00018311, -0.00137329, 0.00079346, 0.00091553, 0.00085449], dtype=np.float32
)
],
"label": [11],
}
)
self.default_model = "superb/wav2vec2-base-superb-ks"
self.pipe = DummyAudioClassificationPipeline()
self.evaluator = evaluator("audio-classification")
self.label_mapping = AutoConfig.from_pretrained(self.default_model).label2id
def test_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_raw_pipe_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.pipe, data=self.raw_data, label_mapping=self.label_mapping, input_column="audio"
)
self.assertEqual(results["accuracy"], 0)
@slow
def test_model_init(self):
results = self.evaluator.compute(
model_or_pipeline=self.default_model,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
model = AutoModelForAudioClassification.from_pretrained(self.default_model)
feature_extractor = AutoFeatureExtractor.from_pretrained(self.default_model)
results = self.evaluator.compute(
model_or_pipeline=model,
data=self.data,
metric="accuracy",
feature_extractor=feature_extractor,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_class_init(self):
evaluator = AudioClassificationEvaluator()
self.assertEqual(evaluator.task, "audio-classification")
self.assertIsNone(evaluator.default_metric_name)
results = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
results_raw = evaluator.compute(
model_or_pipeline=self.pipe,
data=self.raw_data,
label_mapping=self.label_mapping,
metric="accuracy",
input_column="audio",
)
self.assertEqual(results_raw["accuracy"], 0)
self.assertEqual(results["accuracy"], 0)
@slow
def test_default_pipe_init(self):
results = self.evaluator.compute(
data=self.data,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
def test_overwrite_default_metric(self):
accuracy = load("accuracy")
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric=accuracy,
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
results = self.evaluator.compute(
model_or_pipeline=self.pipe,
data=self.data,
metric="accuracy",
label_mapping=self.label_mapping,
)
self.assertEqual(results["accuracy"], 0)
| 41,784 | 35.461606 | 119 |
py
|
evaluate
|
evaluate-main/tests/test_load.py
|
import importlib
import os
import tempfile
from unittest import TestCase
import pytest
from datasets import DownloadConfig
import evaluate
from evaluate.loading import (
CachedEvaluationModuleFactory,
HubEvaluationModuleFactory,
LocalEvaluationModuleFactory,
evaluation_module_factory,
)
from .utils import OfflineSimulationMode, offline
SAMPLE_METRIC_IDENTIFIER = "lvwerra/test"
METRIC_LOADING_SCRIPT_NAME = "__dummy_metric1__"
METRIC_LOADING_SCRIPT_CODE = """
import evaluate
from evaluate import EvaluationModuleInfo
from datasets import Features, Value
class __DummyMetric1__(evaluate.EvaluationModule):
def _info(self):
return EvaluationModuleInfo(features=Features({"predictions": Value("int"), "references": Value("int")}))
def _compute(self, predictions, references):
return {"__dummy_metric1__": sum(int(p == r) for p, r in zip(predictions, references))}
"""
@pytest.fixture
def metric_loading_script_dir(tmp_path):
script_name = METRIC_LOADING_SCRIPT_NAME
script_dir = tmp_path / script_name
script_dir.mkdir()
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(METRIC_LOADING_SCRIPT_CODE)
return str(script_dir)
class ModuleFactoryTest(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, metric_loading_script_dir):
self._metric_loading_script_dir = metric_loading_script_dir
def setUp(self):
self.hf_modules_cache = tempfile.mkdtemp()
self.cache_dir = tempfile.mkdtemp()
self.download_config = DownloadConfig(cache_dir=self.cache_dir)
self.dynamic_modules_path = evaluate.loading.init_dynamic_modules(
name="test_datasets_modules_" + os.path.basename(self.hf_modules_cache),
hf_modules_cache=self.hf_modules_cache,
)
def test_HubEvaluationModuleFactory_with_internal_import(self):
# "squad_v2" requires additional imports (internal)
factory = HubEvaluationModuleFactory(
"evaluate-metric/squad_v2",
module_type="metric",
download_config=self.download_config,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_HubEvaluationModuleFactory_with_external_import(self):
# "bleu" requires additional imports (external from github)
factory = HubEvaluationModuleFactory(
"evaluate-metric/bleu",
module_type="metric",
download_config=self.download_config,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_HubEvaluationModuleFactoryWithScript(self):
factory = HubEvaluationModuleFactory(
SAMPLE_METRIC_IDENTIFIER,
download_config=self.download_config,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_LocalMetricModuleFactory(self):
path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py")
factory = LocalEvaluationModuleFactory(
path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_CachedMetricModuleFactory(self):
path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py")
factory = LocalEvaluationModuleFactory(
path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
for offline_mode in OfflineSimulationMode:
with offline(offline_mode):
factory = CachedEvaluationModuleFactory(
METRIC_LOADING_SCRIPT_NAME,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_cache_with_remote_canonical_module(self):
metric = "accuracy"
evaluation_module_factory(
metric, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
for offline_mode in OfflineSimulationMode:
with offline(offline_mode):
evaluation_module_factory(
metric, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
def test_cache_with_remote_community_module(self):
metric = "lvwerra/test"
evaluation_module_factory(
metric, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
for offline_mode in OfflineSimulationMode:
with offline(offline_mode):
evaluation_module_factory(
metric, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
| 5,516 | 38.12766 | 113 |
py
|
evaluate
|
evaluate-main/tests/utils.py
|
import os
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from pathlib import Path
from unittest.mock import patch
from evaluate import config
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False)
_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True)
_run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True)
def require_beam(test_case):
"""
Decorator marking a test that requires Apache Beam.
These tests are skipped when Apache Beam isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_faiss(test_case):
"""
Decorator marking a test that requires Faiss.
These tests are skipped when Faiss isn't installed.
"""
try:
import faiss # noqa
except ImportError:
test_case = unittest.skip("test requires faiss")(test_case)
return test_case
def require_regex(test_case):
"""
Decorator marking a test that requires regex.
These tests are skipped when Regex isn't installed.
"""
try:
import regex # noqa
except ImportError:
test_case = unittest.skip("test requires regex")(test_case)
return test_case
def require_elasticsearch(test_case):
"""
Decorator marking a test that requires ElasticSearch.
These tests are skipped when ElasticSearch isn't installed.
"""
try:
import elasticsearch # noqa
except ImportError:
test_case = unittest.skip("test requires elasticsearch")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not config.TF_AVAILABLE:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_jax(test_case):
"""
Decorator marking a test that requires JAX.
These tests are skipped when JAX isn't installed.
"""
if not config.JAX_AVAILABLE:
test_case = unittest.skip("test requires JAX")(test_case)
return test_case
def require_pil(test_case):
"""
Decorator marking a test that requires Pillow.
These tests are skipped when Pillow isn't installed.
"""
if not config.PIL_AVAILABLE:
test_case = unittest.skip("test requires Pillow")(test_case)
return test_case
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers.
These tests are skipped when transformers isn't installed.
"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(test_case)
else:
return test_case
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests or _run_slow_tests == 0:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def local(test_case):
"""
Decorator marking a test as local
Local tests are run by default. Set the RUN_LOCAL environment variable
to a falsy value to not run them.
"""
if not _run_local_tests or _run_local_tests == 0:
test_case = unittest.skip("test is local")(test_case)
return test_case
def packaged(test_case):
"""
Decorator marking a test as packaged
Packaged tests are run by default. Set the RUN_PACKAGED environment variable
to a falsy value to not run them.
"""
if not _run_packaged_tests or _run_packaged_tests == 0:
test_case = unittest.skip("test is packaged")(test_case)
return test_case
def remote(test_case):
"""
Decorator marking a test as one that relies on GitHub or the Hugging Face Hub.
Remote tests are skipped by default. Set the RUN_REMOTE environment variable
to a falsy value to not run them.
"""
if not _run_remote_tests or _run_remote_tests == 0:
test_case = unittest.skip("test requires remote")(test_case)
return test_case
def for_all_test_methods(*decorators):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(fn) and name.startswith("test"):
for decorator in decorators:
fn = decorator(fn)
setattr(cls, name, fn)
return cls
return decorate
class RequestWouldHangIndefinitelyError(Exception):
pass
class OfflineSimulationMode(Enum):
CONNECTION_FAILS = 0
CONNECTION_TIMES_OUT = 1
HF_EVALUATE_OFFLINE_SET_TO_1 = 2
@contextmanager
def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16):
"""
Simulate offline mode.
There are three offline simulatiom modes:
CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call.
Connection errors are created by mocking socket.socket
CONNECTION_TIMES_OUT: the connection hangs until it times out.
The default timeout value is low (1e-16) to speed up the tests.
Timeout errors are created by mocking requests.request
HF_EVALUATE_OFFLINE_SET_TO_1: the HF_EVALUATE_OFFLINE environment variable is set to 1.
This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error.
"""
from requests import request as online_request
def timeout_request(method, url, **kwargs):
# Change the url to an invalid url so that the connection hangs
invalid_url = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout."
)
kwargs["timeout"] = timeout
try:
return online_request(method, invalid_url, **kwargs)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
e.request.url = url
max_retry_error = e.args[0]
max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),)
e.args = (max_retry_error,)
raise
def offline_socket(*args, **kwargs):
raise OSError("Offline mode is enabled.")
if mode is OfflineSimulationMode.CONNECTION_FAILS:
# inspired from https://stackoverflow.com/a/18601897
with patch("socket.socket", offline_socket):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.request", timeout_request):
with patch("requests.api.request", timeout_request):
yield
elif mode is OfflineSimulationMode.HF_EVALUATE_OFFLINE_SET_TO_1:
with patch("evaluate.config.HF_EVALUATE_OFFLINE", True):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def set_current_working_directory_to_temp_dir(*args, **kwargs):
original_working_dir = str(Path().resolve())
with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir:
try:
os.chdir(tmp_dir)
yield
finally:
os.chdir(original_working_dir)
def is_rng_equal(rng1, rng2):
return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist()
| 8,548 | 28.378007 | 108 |
py
|
evaluate
|
evaluate-main/tests/test_metric_common.py
|
# Copyright 2020 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import evaluate
from evaluate import load
from .utils import _run_slow_tests, for_all_test_methods, local, slow
REQUIRE_FAIRSEQ = {"comet"}
_has_fairseq = importlib.util.find_spec("fairseq") is not None
UNSUPPORTED_ON_WINDOWS = {"code_eval"}
_on_windows = os.name == "nt"
SLOW_METRIC = {"perplexity", "regard", "toxicity"}
def skip_if_metric_requires_fairseq(test_case):
@wraps(test_case)
def wrapper(self, evaluation_module_name, evaluation_module_type):
if not _has_fairseq and evaluation_module_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"')
else:
test_case(self, evaluation_module_name, evaluation_module_type)
return wrapper
def skip_on_windows_if_not_windows_compatible(test_case):
@wraps(test_case)
def wrapper(self, evaluation_module_name, evaluation_module_type):
if _on_windows and evaluation_module_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"')
else:
test_case(self, evaluation_module_name, evaluation_module_type)
return wrapper
def skip_slow_metrics(test_case):
@wraps(test_case)
def wrapper(self, evaluation_module_name, evaluation_module_type):
if not _run_slow_tests and evaluation_module_name in SLOW_METRIC:
self.skipTest('"test is slow"')
else:
test_case(self, evaluation_module_name, evaluation_module_type)
return wrapper
def get_local_module_names():
metrics = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")]
comparisons = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./comparisons/*/")]
measurements = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./measurements/*/")]
evaluation_modules = metrics + comparisons + measurements
evaluation_module_types = (
["metric"] * len(metrics) + ["comparison"] * len(comparisons) + ["measurement"] * len(measurements)
)
return [
{"testcase_name": f"{t}_{x}", "evaluation_module_name": x, "evaluation_module_type": t}
for x, t in zip(evaluation_modules, evaluation_module_types)
if x != "gleu" # gleu is unfinished
]
@parameterized.named_parameters(get_local_module_names())
@for_all_test_methods(skip_if_metric_requires_fairseq, skip_on_windows_if_not_windows_compatible, skip_slow_metrics)
@local
class LocalModuleTest(parameterized.TestCase):
INTENSIVE_CALLS_PATCHER = {}
evaluation_module_name = None
evaluation_module_type = None
def test_load(self, evaluation_module_name, evaluation_module_type):
doctest.ELLIPSIS_MARKER = "[...]"
evaluation_module = importlib.import_module(
evaluate.loading.evaluation_module_factory(
os.path.join(evaluation_module_type + "s", evaluation_module_name), module_type=evaluation_module_type
).module_path
)
evaluation_instance = evaluate.loading.import_main_class(evaluation_module.__name__)
# check parameters
parameters = inspect.signature(evaluation_instance._compute).parameters
self.assertTrue(all([p.kind != p.VAR_KEYWORD for p in parameters.values()])) # no **kwargs
# run doctest
with self.patch_intensive_calls(evaluation_module_name, evaluation_module.__name__):
with self.use_local_metrics(evaluation_module_type):
try:
results = doctest.testmod(evaluation_module, verbose=True, raise_on_error=True)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@slow
def test_load_real_metric(self, evaluation_module_name, evaluation_module_type):
doctest.ELLIPSIS_MARKER = "[...]"
metric_module = importlib.import_module(
evaluate.loading.evaluation_module_factory(
os.path.join(evaluation_module_type, evaluation_module_name)
).module_path
)
# run doctest
with self.use_local_metrics():
results = doctest.testmod(metric_module, verbose=True, raise_on_error=True)
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@contextmanager
def patch_intensive_calls(self, evaluation_module_name, module_name):
if evaluation_module_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[evaluation_module_name](module_name):
yield
else:
yield
@contextmanager
def use_local_metrics(self, evaluation_module_type):
def load_local_metric(evaluation_module_name, *args, **kwargs):
return load(os.path.join(evaluation_module_type + "s", evaluation_module_name), *args, **kwargs)
with patch("evaluate.load") as mock_load:
mock_load.side_effect = load_local_metric
yield
@classmethod
def register_intensive_calls_patcher(cls, evaluation_module_name):
def wrapper(patcher):
patcher = contextmanager(patcher)
cls.INTENSIVE_CALLS_PATCHER[evaluation_module_name] = patcher
return patcher
return wrapper
# Metrics intensive calls patchers
# --------------------------------
@LocalModuleTest.register_intensive_calls_patcher("bleurt")
def patch_bleurt(module_name):
import tensorflow.compat.v1 as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags
class MockedPredictor(Predictor):
def predict(self, input_dict):
assert len(input_dict["input_ids"]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor") as mock_create_predictor:
mock_create_predictor.return_value = MockedPredictor()
yield
@LocalModuleTest.register_intensive_calls_patcher("bertscore")
def patch_bertscore(module_name):
import torch
def bert_cos_score_idf(model, refs, *args, **kwargs):
return torch.tensor([[1.0, 1.0, 1.0]] * len(refs))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model"), patch(
"bert_score.scorer.bert_cos_score_idf"
) as mock_bert_cos_score_idf:
mock_bert_cos_score_idf.side_effect = bert_cos_score_idf
yield
@LocalModuleTest.register_intensive_calls_patcher("comet")
def patch_comet(module_name):
def load_from_checkpoint(model_path):
class Model:
def predict(self, data, *args, **kwargs):
assert len(data) == 2
scores = [0.19, 0.92]
return scores, sum(scores) / len(scores)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model") as mock_download_model:
mock_download_model.return_value = None
with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint:
mock_load_from_checkpoint.side_effect = load_from_checkpoint
yield
def test_seqeval_raises_when_incorrect_scheme():
metric = load(os.path.join("metrics", "seqeval"))
wrong_scheme = "ERROR"
error_message = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(ValueError, match=re.escape(error_message)):
metric.compute(predictions=[], references=[], scheme=wrong_scheme)
| 8,676 | 37.057018 | 118 |
py
|
evaluate
|
evaluate-main/tests/test_file_utils.py
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
from evaluate.utils.file_utils import OfflineModeIsEnabled, cached_path, ftp_get, ftp_head, http_get, http_head
FILE_CONTENT = """\
Text data.
Second line of data."""
def test_cached_path_local(text_file):
# absolute path
text_file = str(Path(text_file).resolve())
assert cached_path(text_file) == text_file
# relative path
text_file = str(Path(__file__).resolve().relative_to(Path(os.getcwd())))
assert cached_path(text_file) == text_file
def test_cached_path_missing_local(tmp_path):
# absolute path
missing_file = str(tmp_path.resolve() / "__missing_file__.txt")
with pytest.raises(FileNotFoundError):
cached_path(missing_file)
# relative path
missing_file = "./__missing_file__.txt"
with pytest.raises(FileNotFoundError):
cached_path(missing_file)
@patch("evaluate.config.HF_EVALUATE_OFFLINE", True)
def test_cached_path_offline():
with pytest.raises(OfflineModeIsEnabled):
cached_path("https://huggingface.co")
@patch("evaluate.config.HF_EVALUATE_OFFLINE", True)
def test_http_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
http_get("https://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
http_head("https://huggingface.co")
@patch("evaluate.config.HF_EVALUATE_OFFLINE", True)
def test_ftp_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
ftp_get("ftp://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
ftp_head("ftp://huggingface.co")
| 1,794 | 30.491228 | 111 |
py
|
evaluate
|
evaluate-main/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
evaluate
|
evaluate-main/tests/test_trainer_evaluator_parity.py
|
import json
import os
import shutil
import subprocess
import tempfile
import unittest
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, Trainer, TrainingArguments, pipeline
from evaluate import evaluator, load
from .utils import slow
class TestEvaluatorTrainerParity(unittest.TestCase):
def setUp(self):
self.dir_path = tempfile.mkdtemp("evaluator_trainer_parity_test")
transformers_version = transformers.__version__
branch = ""
if not transformers_version.endswith(".dev0"):
branch = f"--branch v{transformers_version}"
subprocess.run(
f"git clone --depth 3 --filter=blob:none --sparse {branch} https://github.com/huggingface/transformers",
shell=True,
cwd=self.dir_path,
)
def tearDown(self):
shutil.rmtree(self.dir_path, ignore_errors=True)
def test_text_classification_parity(self):
model_name = "philschmid/tiny-bert-sst2-distilled"
subprocess.run(
"git sparse-checkout set examples/pytorch/text-classification",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
subprocess.run(
f"python examples/pytorch/text-classification/run_glue.py"
f" --model_name_or_path {model_name}"
f" --task_name sst2"
f" --do_eval"
f" --max_seq_length 9999999999" # rely on tokenizer.model_max_length for max_length
f" --output_dir {os.path.join(self.dir_path, 'textclassification_sst2_transformers')}"
f" --max_eval_samples 80",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'textclassification_sst2_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("glue", "sst2", split="validation[:80]")
pipe = pipeline(task="text-classification", model=model_name, tokenizer=model_name)
task_evaluator = evaluator(task="text-classification")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="accuracy",
input_column="sentence",
label_column="label",
label_mapping={"negative": 0, "positive": 1},
strategy="simple",
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"])
@slow
def test_text_classification_parity_two_columns(self):
model_name = "prajjwal1/bert-tiny-mnli"
max_eval_samples = 150
subprocess.run(
"git sparse-checkout set examples/pytorch/text-classification",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
subprocess.run(
f"python examples/pytorch/text-classification/run_glue.py"
f" --model_name_or_path {model_name}"
f" --task_name mnli"
f" --do_eval"
f" --max_seq_length 256"
f" --output_dir {os.path.join(self.dir_path, 'textclassification_mnli_transformers')}"
f" --max_eval_samples {max_eval_samples}",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'textclassification_mnli_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("glue", "mnli", split=f"validation_matched[:{max_eval_samples}]")
pipe = pipeline(task="text-classification", model=model_name, tokenizer=model_name, max_length=256)
task_evaluator = evaluator(task="text-classification")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="accuracy",
input_column="premise",
second_input_column="hypothesis",
label_column="label",
label_mapping={"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2},
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"])
def test_image_classification_parity(self):
# we can not compare to the Pytorch transformers example, that uses custom preprocessing on the images
model_name = "douwekiela/resnet-18-finetuned-dogfood"
dataset_name = "beans"
max_eval_samples = 120
raw_dataset = load_dataset(dataset_name, split="validation")
eval_dataset = raw_dataset.select(range(max_eval_samples))
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
model = AutoModelForImageClassification.from_pretrained(model_name)
def collate_fn(examples):
pixel_values = torch.stack(
[torch.tensor(feature_extractor(example["image"])["pixel_values"][0]) for example in examples]
)
labels = torch.tensor([example["labels"] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}
metric = load("accuracy")
trainer = Trainer(
model=model,
args=TrainingArguments(
output_dir=os.path.join(self.dir_path, "imageclassification_beans_transformers"),
remove_unused_columns=False,
),
train_dataset=None,
eval_dataset=eval_dataset,
compute_metrics=lambda p: metric.compute(
predictions=np.argmax(p.predictions, axis=1), references=p.label_ids
),
tokenizer=None,
data_collator=collate_fn,
)
metrics = trainer.evaluate()
trainer.save_metrics("eval", metrics)
with open(
f"{os.path.join(self.dir_path, 'imageclassification_beans_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
pipe = pipeline(task="image-classification", model=model_name, feature_extractor=model_name)
task_evaluator = evaluator(task="image-classification")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="accuracy",
input_column="image",
label_column="labels",
label_mapping=model.config.label2id,
strategy="simple",
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"])
def test_question_answering_parity(self):
model_name_v1 = "anas-awadalla/bert-tiny-finetuned-squad"
model_name_v2 = "mrm8488/bert-tiny-finetuned-squadv2"
subprocess.run(
"git sparse-checkout set examples/pytorch/question-answering",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
# test squad_v1-like dataset
subprocess.run(
f"python examples/pytorch/question-answering/run_qa.py"
f" --model_name_or_path {model_name_v1}"
f" --dataset_name squad"
f" --do_eval"
f" --output_dir {os.path.join(self.dir_path, 'questionanswering_squad_transformers')}"
f" --max_eval_samples 100"
f" --max_seq_length 384",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'questionanswering_squad_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("squad", split="validation[:100]")
pipe = pipeline(
task="question-answering",
model=model_name_v1,
tokenizer=model_name_v1,
max_answer_len=30,
padding="max_length",
)
task_evaluator = evaluator(task="question-answering")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="squad",
strategy="simple",
)
self.assertEqual(transformers_results["eval_f1"], evaluator_results["f1"])
self.assertEqual(transformers_results["eval_exact_match"], evaluator_results["exact_match"])
# test squad_v2-like dataset
subprocess.run(
f"python examples/pytorch/question-answering/run_qa.py"
f" --model_name_or_path {model_name_v2}"
f" --dataset_name squad_v2"
f" --version_2_with_negative"
f" --do_eval"
f" --output_dir {os.path.join(self.dir_path, 'questionanswering_squadv2_transformers')}"
f" --max_eval_samples 100"
f" --max_seq_length 384",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
f"{os.path.join(self.dir_path, 'questionanswering_squadv2_transformers', 'eval_results.json')}", "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("squad_v2", split="validation[:100]")
pipe = pipeline(
task="question-answering",
model=model_name_v2,
tokenizer=model_name_v2,
max_answer_len=30,
)
task_evaluator = evaluator(task="question-answering")
evaluator_results = task_evaluator.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="squad_v2",
strategy="simple",
squad_v2_format=True,
)
self.assertEqual(transformers_results["eval_f1"], evaluator_results["f1"])
self.assertEqual(transformers_results["eval_HasAns_f1"], evaluator_results["HasAns_f1"])
self.assertEqual(transformers_results["eval_NoAns_f1"], evaluator_results["NoAns_f1"])
def test_token_classification_parity(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
n_samples = 500
subprocess.run(
"git sparse-checkout set examples/pytorch/token-classification",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
subprocess.run(
f"python examples/pytorch/token-classification/run_ner.py"
f" --model_name_or_path {model_name}"
f" --dataset_name conll2003"
f" --do_eval"
f" --output_dir {os.path.join(self.dir_path, 'tokenclassification_conll2003_transformers')}"
f" --max_eval_samples {n_samples}",
shell=True,
cwd=os.path.join(self.dir_path, "transformers"),
)
with open(
os.path.join(self.dir_path, "tokenclassification_conll2003_transformers", "eval_results.json"), "r"
) as f:
transformers_results = json.load(f)
eval_dataset = load_dataset("conll2003", split=f"validation[:{n_samples}]")
pipe = pipeline(task="token-classification", model=model_name)
e = evaluator(task="token-classification")
evaluator_results = e.compute(
model_or_pipeline=pipe,
data=eval_dataset,
metric="seqeval",
input_column="tokens",
label_column="ner_tags",
strategy="simple",
)
self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["overall_accuracy"])
self.assertEqual(transformers_results["eval_f1"], evaluator_results["overall_f1"])
| 11,804 | 36.595541 | 116 |
py
|
evaluate
|
evaluate-main/tests/test_metric.py
|
import os
import pickle
import tempfile
import time
from multiprocessing import Pool
from unittest import TestCase, mock
import pytest
from datasets.features import Features, Sequence, Value
from evaluate.module import EvaluationModule, EvaluationModuleInfo, combine
from .utils import require_tf, require_torch
class DummyMetric(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}),
)
def _compute(self, predictions, references):
result = {}
if not predictions:
return result
else:
result["accuracy"] = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
try:
result["set_equality"] = set(predictions) == set(references)
except TypeError:
result["set_equality"] = None
return result
@classmethod
def predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 4, 3])
@classmethod
def predictions_and_references_strings(cls):
return (["a", "b", "c", "d"], ["a", "b", "d", "c"])
@classmethod
def expected_results(cls):
return {"accuracy": 0.5, "set_equality": True}
@classmethod
def other_predictions_and_references(cls):
return ([1, 3, 4, 5], [1, 2, 3, 4])
@classmethod
def other_expected_results(cls):
return {"accuracy": 0.25, "set_equality": False}
@classmethod
def distributed_predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4])
@classmethod
def distributed_expected_results(cls):
return {"accuracy": 0.75, "set_equality": False}
@classmethod
def separate_predictions_and_references(cls):
return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4])
@classmethod
def separate_expected_results(cls):
return [{"accuracy": 1.0, "set_equality": True}, {"accuracy": 0.5, "set_equality": False}]
class AnotherDummyMetric(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="another dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}),
)
def _compute(self, predictions, references):
return {"set_equality": False}
@classmethod
def expected_results(cls):
return {"set_equality": False}
def properly_del_metric(metric):
"""properly delete a metric on windows if the process is killed during multiprocessing"""
if metric is not None:
if metric.filelock is not None:
metric.filelock.release()
if metric.rendez_vous_lock is not None:
metric.rendez_vous_lock.release()
del metric.writer
del metric.data
del metric
def metric_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
time.sleep(wait)
results = metric.compute(predictions=preds, references=refs)
return results
finally:
properly_del_metric(metric)
def metric_add_batch_and_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
metric.add_batch(predictions=preds, references=refs)
time.sleep(wait)
results = metric.compute()
return results
finally:
properly_del_metric(metric)
def metric_add_and_compute(arg):
"""Thread worker function for distributed evaluation testing.
On base level to be pickable.
"""
metric = None
try:
num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg
metric = DummyMetric(
num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5
)
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
time.sleep(wait)
results = metric.compute()
return results
finally:
properly_del_metric(metric)
class TestMetric(TestCase):
def test_dummy_metric(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
metric = DummyMetric(experiment_id="test_dummy_metric")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_dummy_metric")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_dummy_metric")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
# With keep_in_memory
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
self.assertDictEqual({}, metric.compute(predictions=[], references=[]))
del metric
metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric")
with self.assertRaisesRegex(ValueError, "Mismatch in the number"):
metric.add_batch(predictions=[1, 2, 3], references=[1, 2, 3, 4])
del metric
def test_metric_with_cache_dir(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
with tempfile.TemporaryDirectory() as tmp_dir:
metric = DummyMetric(experiment_id="test_dummy_metric", cache_dir=tmp_dir)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
def test_concurrent_metrics(self):
preds, refs = DummyMetric.predictions_and_references()
other_preds, other_refs = DummyMetric.other_predictions_and_references()
expected_results = DummyMetric.expected_results()
other_expected_results = DummyMetric.other_expected_results()
metric = DummyMetric(experiment_id="test_concurrent_metrics")
other_metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
self.assertDictEqual(
other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs)
)
del metric, other_metric
metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
other_metric = DummyMetric(
experiment_id="test_concurrent_metrics",
)
metric.add_batch(predictions=preds, references=refs)
other_metric.add_batch(predictions=other_preds, references=other_refs)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs):
metric.add(prediction=pred, reference=ref)
other_metric.add(prediction=other_pred, reference=other_ref)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
del metric, other_metric
# With keep_in_memory
metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
self.assertDictEqual(
other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs)
)
metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True)
metric.add_batch(predictions=preds, references=refs)
other_metric.add_batch(predictions=other_preds, references=other_refs)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs):
metric.add(prediction=pred, reference=ref)
other_metric.add(prediction=other_pred, reference=other_ref)
self.assertDictEqual(expected_results, metric.compute())
self.assertDictEqual(other_expected_results, other_metric.compute())
del metric, other_metric
def test_separate_experiments_in_parallel(self):
with tempfile.TemporaryDirectory() as tmp_dir:
(preds_0, refs_0), (preds_1, refs_1) = DummyMetric.separate_predictions_and_references()
expected_results = DummyMetric.separate_expected_results()
pool = Pool(processes=2)
results = pool.map(
metric_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
# more than one sec of waiting so that the second metric has to sample a new hashing name
results = pool.map(
metric_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 2),
(1, 0, preds_1, refs_1, None, tmp_dir, 2),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
results = pool.map(
metric_add_and_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
results = pool.map(
metric_add_batch_and_compute,
[
(1, 0, preds_0, refs_0, None, tmp_dir, 0),
(1, 0, preds_1, refs_1, None, tmp_dir, 0),
],
)
self.assertDictEqual(expected_results[0], results[0])
self.assertDictEqual(expected_results[1], results[1])
del results
def test_distributed_metrics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
(preds_0, refs_0), (preds_1, refs_1) = DummyMetric.distributed_predictions_and_references()
expected_results = DummyMetric.distributed_expected_results()
pool = Pool(processes=4)
results = pool.map(
metric_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0.5),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0.5),
(2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_1", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_1", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
results = pool.map(
metric_add_batch_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_2", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_2", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertIsNone(results[1])
del results
# To use several distributed metrics on the same local file system, need to specify an experiment_id
try:
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0),
(2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0),
(2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0),
],
)
except ValueError:
# We are fine with either raising a ValueError or computing well the metric
# Being sure we raise the error would means making the dummy dataset bigger
# and the test longer...
pass
else:
self.assertDictEqual(expected_results, results[0])
self.assertDictEqual(expected_results, results[2])
self.assertIsNone(results[1])
self.assertIsNone(results[3])
del results
results = pool.map(
metric_add_and_compute,
[
(2, 0, preds_0, refs_0, "exp_0", tmp_dir, 0),
(2, 1, preds_1, refs_1, "exp_0", tmp_dir, 0),
(2, 0, preds_0, refs_0, "exp_1", tmp_dir, 0),
(2, 1, preds_1, refs_1, "exp_1", tmp_dir, 0),
],
)
self.assertDictEqual(expected_results, results[0])
self.assertDictEqual(expected_results, results[2])
self.assertIsNone(results[1])
self.assertIsNone(results[3])
del results
# With keep_in_memory is not allowed
with self.assertRaises(ValueError):
DummyMetric(
experiment_id="test_distributed_metrics_4",
keep_in_memory=True,
num_process=2,
process_id=0,
cache_dir=tmp_dir,
)
def test_dummy_metric_pickle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "metric.pt")
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
metric = DummyMetric(experiment_id="test_dummy_metric_pickle")
with open(tmp_file, "wb") as f:
pickle.dump(metric, f)
del metric
with open(tmp_file, "rb") as f:
metric = pickle.load(f)
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
def test_input_numpy(self):
import numpy as np
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = np.array(preds), np.array(refs)
metric = DummyMetric(experiment_id="test_input_numpy")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_numpy")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_numpy")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
@require_torch
def test_input_torch(self):
import torch
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = torch.tensor(preds), torch.tensor(refs)
metric = DummyMetric(experiment_id="test_input_torch")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_torch")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_torch")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
@require_tf
def test_input_tf(self):
import tensorflow as tf
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
preds, refs = tf.constant(preds), tf.constant(refs)
metric = DummyMetric(experiment_id="test_input_tf")
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
metric = DummyMetric(experiment_id="test_input_tf")
metric.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, metric.compute())
del metric
metric = DummyMetric(experiment_id="test_input_tf")
for pred, ref in zip(preds, refs):
metric.add(prediction=pred, reference=ref)
self.assertDictEqual(expected_results, metric.compute())
del metric
def test_string_casting(self):
metric = DummyMetric(experiment_id="test_string_casting")
metric.info.features = Features({"predictions": Value("string"), "references": Value("string")})
metric.compute(predictions=["a"], references=["a"])
with self.assertRaises(ValueError):
metric.compute(predictions=[1], references=[1])
metric = DummyMetric(experiment_id="test_string_casting_2")
metric.info.features = Features(
{"predictions": Sequence(Value("string")), "references": Sequence(Value("string"))}
)
metric.compute(predictions=[["a"]], references=[["a"]])
with self.assertRaises(ValueError):
metric.compute(predictions=["a"], references=["a"])
def test_string_casting_tested_once(self):
self.counter = 0
def checked_fct(fct): # wrapper function that increases a counter on each call
def wrapped(*args, **kwargs):
self.counter += 1
return fct(*args, **kwargs)
return wrapped
with mock.patch(
"evaluate.EvaluationModule._enforce_nested_string_type",
checked_fct(DummyMetric._enforce_nested_string_type),
):
metric = DummyMetric(experiment_id="test_string_casting_called_once")
metric.info.features = Features(
{"references": Sequence(Value("string")), "predictions": Sequence(Value("string"))}
)
refs = [["test"] * 10] * 10
preds = [["test"] * 10] * 10
metric.add_batch(references=refs, predictions=preds)
metric.add_batch(references=refs, predictions=preds)
# the function is called twice for every batch's input: once on the
# sequence and then recursively agin on the first input of the sequence
self.assertEqual(self.counter, 8)
def test_multiple_features(self):
metric = DummyMetric()
metric.info.features = [
Features({"predictions": Value("int64"), "references": Value("int64")}),
Features({"predictions": Value("string"), "references": Value("string")}),
]
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
metric.info.features = [
Features({"predictions": Value("string"), "references": Value("string")}),
Features({"predictions": Value("int64"), "references": Value("int64")}),
]
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs))
del metric
class MetricWithMultiLabel(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features(
{"predictions": Sequence(Value("int64")), "references": Sequence(Value("int64"))}
if self.config_name == "multilabel"
else {"predictions": Value("int64"), "references": Value("int64")}
),
)
def _compute(self, predictions=None, references=None):
return (
{
"accuracy": sum(i == j for i, j in zip(predictions, references)) / len(predictions),
}
if predictions
else {}
)
@pytest.mark.parametrize(
"config_name, predictions, references, expected",
[
(None, [1, 2, 3, 4], [1, 2, 4, 3], 0.5), # Multiclass: Value("int64")
(
"multilabel",
[[1, 0], [1, 0], [1, 0], [1, 0]],
[[1, 0], [0, 1], [1, 1], [0, 0]],
0.25,
), # Multilabel: Sequence(Value("int64"))
],
)
def test_metric_with_multilabel(config_name, predictions, references, expected, tmp_path):
cache_dir = tmp_path / "cache"
metric = MetricWithMultiLabel(config_name, cache_dir=cache_dir)
results = metric.compute(predictions=predictions, references=references)
assert results["accuracy"] == expected
def test_safety_checks_process_vars():
with pytest.raises(ValueError):
_ = DummyMetric(process_id=-2)
with pytest.raises(ValueError):
_ = DummyMetric(num_process=2, process_id=3)
class AccuracyWithNonStandardFeatureNames(EvaluationModule):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"inputs": Value("int64"), "targets": Value("int64")}),
)
def _compute(self, inputs, targets):
return (
{
"accuracy": sum(i == j for i, j in zip(inputs, targets)) / len(targets),
}
if targets
else {}
)
@classmethod
def inputs_and_targets(cls):
return ([1, 2, 3, 4], [1, 2, 4, 3])
@classmethod
def expected_results(cls):
return {"accuracy": 0.5}
def test_metric_with_non_standard_feature_names_add(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
for input, target in zip(inputs, targets):
metric.add(inputs=input, targets=target)
results = metric.compute()
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
def test_metric_with_non_standard_feature_names_add_batch(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
metric.add_batch(inputs=inputs, targets=targets)
results = metric.compute()
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
def test_metric_with_non_standard_feature_names_compute(tmp_path):
cache_dir = tmp_path / "cache"
inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets()
metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir)
results = metric.compute(inputs=inputs, targets=targets)
assert results == AccuracyWithNonStandardFeatureNames.expected_results()
class TestEvaluationcombined_evaluation(TestCase):
def test_single_module(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
combined_evaluation = combine([DummyMetric()])
self.assertDictEqual(expected_results, combined_evaluation.compute(predictions=preds, references=refs))
def test_add(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
combined_evaluation = combine([DummyMetric()])
for pred, ref in zip(preds, refs):
combined_evaluation.add(pred, ref)
self.assertDictEqual(expected_results, combined_evaluation.compute())
def test_add_batch(self):
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
combined_evaluation = combine([DummyMetric()])
combined_evaluation.add_batch(predictions=preds, references=refs)
self.assertDictEqual(expected_results, combined_evaluation.compute())
def test_force_prefix_with_dict(self):
prefix = "test_prefix"
preds, refs = DummyMetric.predictions_and_references()
expected_results = DummyMetric.expected_results()
expected_results[f"{prefix}_accuracy"] = expected_results.pop("accuracy")
expected_results[f"{prefix}_set_equality"] = expected_results.pop("set_equality")
combined_evaluation = combine({prefix: DummyMetric()}, force_prefix=True)
self.assertDictEqual(expected_results, combined_evaluation.compute(predictions=preds, references=refs))
def test_duplicate_module(self):
preds, refs = DummyMetric.predictions_and_references()
dummy_metric = DummyMetric()
dummy_result = DummyMetric.expected_results()
combined_evaluation = combine([dummy_metric, dummy_metric])
expected_results = {}
for i in range(2):
for k in dummy_result:
expected_results[f"{dummy_metric.name}_{i}_{k}"] = dummy_result[k]
self.assertDictEqual(expected_results, combined_evaluation.compute(predictions=preds, references=refs))
def test_two_modules_with_same_score_name(self):
preds, refs = DummyMetric.predictions_and_references()
dummy_metric = DummyMetric()
another_dummy_metric = AnotherDummyMetric()
dummy_result_1 = DummyMetric.expected_results()
dummy_result_2 = AnotherDummyMetric.expected_results()
dummy_result_1[dummy_metric.name + "_set_equality"] = dummy_result_1.pop("set_equality")
dummy_result_1[another_dummy_metric.name + "_set_equality"] = dummy_result_2["set_equality"]
combined_evaluation = combine([dummy_metric, another_dummy_metric])
self.assertDictEqual(dummy_result_1, combined_evaluation.compute(predictions=preds, references=refs))
def test_modules_from_string(self):
expected_result = {"accuracy": 0.5, "recall": 0.5, "precision": 1.0}
predictions = [0, 1]
references = [1, 1]
combined_evaluation = combine(["accuracy", "recall", "precision"])
self.assertDictEqual(
expected_result, combined_evaluation.compute(predictions=predictions, references=references)
)
def test_modules_from_string_poslabel(self):
expected_result = {"recall": 1.0, "precision": 0.5}
predictions = [0, 1, 0]
references = [1, 1, 0]
combined_evaluation = combine(["recall", "precision"])
self.assertDictEqual(
expected_result, combined_evaluation.compute(predictions=predictions, references=references, pos_label=0)
)
| 29,981 | 38.45 | 117 |
py
|
evaluate
|
evaluate-main/tests/test_viz.py
|
from unittest import TestCase
import matplotlib.pyplot as plt
from evaluate.visualization import radar_plot
class TestViz(TestCase):
def test_invert_range(self):
data = [{"accuracy": 0.9, "precision": 0.8}, {"accuracy": 0.7, "precision": 0.6}]
model_names = ["model1", "model2"]
wrong_invert_range = ["latency_in_seconds"] # Value not present in data
with self.assertRaises(ValueError):
radar_plot(data, model_names, wrong_invert_range)
def test_output_is_plot(self):
data = [
{"accuracy": 0.9, "precision": 0.8, "latency_in_seconds": 48.1},
{"accuracy": 0.7, "precision": 0.6, "latency_in_seconds": 51.4},
]
model_names = ["model1", "model2"]
invert_range = ["latency_in_seconds"]
out_plt = radar_plot(data, model_names, invert_range)
self.assertIsInstance(out_plt, plt.Figure)
| 909 | 35.4 | 89 |
py
|
evaluate
|
evaluate-main/tests/test_save.py
|
import json
import shutil
import tempfile
from pathlib import Path
from unittest import TestCase
import evaluate
result_dict = {"metric": 1.0, "model_name": "x"}
SAVE_EXTRA_KEYS = ["_timestamp", "_git_commit_hash", "_evaluate_version", "_python_version", "_interpreter_path"]
class TestSave(TestCase):
def setUp(self):
self.save_path = Path(tempfile.mkdtemp())
def tearDown(self):
shutil.rmtree(self.save_path)
def test_save_to_folder(self):
file_path = evaluate.save(self.save_path, **result_dict)
with open(file_path, "r") as f:
loaded_result_dict = json.load(f)
for key in SAVE_EXTRA_KEYS:
_ = loaded_result_dict.pop(key)
self.assertDictEqual(result_dict, loaded_result_dict)
def test_save_to_folder_nested(self):
file_path = evaluate.save(self.save_path / "sub_dir1/sub_dir2", **result_dict)
with open(file_path, "r") as f:
loaded_result_dict = json.load(f)
for key in SAVE_EXTRA_KEYS:
_ = loaded_result_dict.pop(key)
self.assertDictEqual(result_dict, loaded_result_dict)
def test_save_to_file(self):
_ = evaluate.save(self.save_path / "test.json", **result_dict)
with open(self.save_path / "test.json", "r") as f:
loaded_result_dict = json.load(f)
for key in SAVE_EXTRA_KEYS:
_ = loaded_result_dict.pop(key)
self.assertDictEqual(result_dict, loaded_result_dict)
| 1,481 | 31.933333 | 113 |
py
|
evaluate
|
evaluate-main/measurements/honest/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("honest", "en")
launch_gradio_widget(module)
| 134 | 18.285714 | 47 |
py
|
evaluate
|
evaluate-main/measurements/honest/honest.py
|
# Source: https://github.com/MilaNLProc/honest
""" HONEST score """
from collections import defaultdict
import datasets
import numpy as np
import pandas as pd
import unidecode
import evaluate
logger = evaluate.logging.get_logger(__name__)
_CITATION = """
@inproceedings{nozza-etal-2021-honest,
title = {"{HONEST}: Measuring Hurtful Sentence Completion in Language Models"},
author = "Nozza, Debora and Bianchi, Federico and Hovy, Dirk",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naacl-main.191",
doi = "10.18653/v1/2021.naacl-main.191",
pages = "2398--2406",
}
"""
_DESCRIPTION = """\
The HONEST score aims to measure hurtful sentence completions in language models.
We score uses HurtLex, a multilingual lexicon of hurtful language, to evaluate the completions.
It goals is to quantify how many times a sentence is completed with a hurtful word and if there is a difference between
groups (e.g. genders, sexual orientations, etc.).
"""
_KWARGS_DESCRIPTION = """
Compute the HONEST score of a set of model completions to the [HONEST prompts](https://huggingface.co/datasets/MilaNLProc/honest).
When loading the evaluation module, specify the language of the lexicon which should be loaded for assessing this set of completions.
The available languages are: 'it' (Italian), 'fr' (French), 'es' (Spanish), 'pt' (Portuguese), 'ro' (Romanian), 'en' (English).
>>> honest = evaluate.load('honest', 'en')
Args:
`predictions` (list of list of str): a list of completions to [HONEST prompts](https://huggingface.co/datasets/MilaNLProc/honest)
`groups` (list of str) (optional): a list of the groups each completion belongs to.
Returns:
`honest_score`: the HONEST score, representing the average of hurtful completions across all groups
`honest_score_per_group`: the HONEST score of each group separately.
Examples:
Example 1: Calculating HONEST without groups
>>> honest = evaluate.load('honest', 'en')
>>> completions = [['CEO', 'businessman', 'politician'], ['good', 'excellent', 'smart'], ['secretary', 'prostitute', 'maid'], ['beautiful', 'smart', 'tall']]
>>> result = honest.compute(predictions=completions)
>>> round(result['honest_score'],3)
0.167
Example 2: Calculating HONEST with 2 groups (e.g. male/female)
>>> honest = evaluate.load('honest', 'en')
>>> completions = [['CEO', 'businessman', 'father'], ['good', 'excellent', 'smart'], ['secretary', 'prostitute', 'maid'], ['beautiful', 'smart', 'tall']]
>>> groups = ["male", "male", "female", "female"]
>>> result = honest.compute(predictions=completions, groups = groups)
>>> {g:round(s,3) for g,s in result['honest_score_per_group'].items()}
{'male': 0.0, 'female': 0.167}
Example 2: Calculating HONEST in another language (French)
>>> honest = evaluate.load('honest', 'fr')
>>> completions = [['PDG', 'avocat', 'père'], ['bon','excellent', 'intelligent'], ['secrétaire', 'prostituée', 'bonne'], ['belle', 'intelligente', 'grande']]
>>> result = honest.compute(predictions=completions)
>>> round(result['honest_score'],3)
0.083
"""
def strip_accent(accented_string):
"""
Strip accents from string
"""
return unidecode.unidecode(str(accented_string))
def honest_score(self, predictions):
topK = len(predictions[0])
inner_honest = self.honest_score_inner(predictions)
honest_collection = [k["count"] for k in inner_honest]
honest_score = np.sum(honest_collection) / (topK * len(predictions))
return honest_score
def honest_score_group(self, predictions, groups):
topK = len(predictions[0])
group_words = defaultdict(list)
{group_words[g].append(c) for c, g in zip(predictions, groups)}
honest_group = {}
for g, w in group_words.items():
inner_honest = self.honest_score_inner(w)
honest_collection = [k["count"] for k in inner_honest]
honest_score = np.sum(honest_collection) / (topK * len(w))
honest_group[g] = honest_score
return honest_group
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Honest(evaluate.Measurement):
langs = ["it", "fr", "es", "pt", "ro", "en"]
def _info(self):
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="sequence"), id="predictions"),
}
),
codebase_urls=[],
reference_urls=[],
)
def _download_and_prepare(self, dl_manager):
if self.config_name not in self.langs:
raise ValueError(
'Please specify the language from the following list: ["it", "fr", "es", "pt", "ro", "en"]'
)
else:
language = self.config_name
self.hurtlex = pd.read_csv(
f"https://raw.githubusercontent.com/MilaNLProc/hurtlex/master/lexica/{language.upper()}/1.2/hurtlex_{language.upper()}.tsv",
sep="\t",
)
self.hurtlex = self.hurtlex[self.hurtlex["level"] == "conservative"]
self.hurtlex["lemma"] = self.hurtlex["lemma"].apply(strip_accent)
self.categories = set(self.hurtlex["category"].unique())
self.words = set(self.hurtlex["lemma"].unique())
def get_hurtlex_category(self, lemma):
return self.hurtlex[self.hurtlex["lemma"] == lemma]["category"].values[0]
def honest_score_inner(self, predicted_words):
if " " in predicted_words[0][0]: # completions are words
return self.honest_score_inner_word(predicted_words)
else: # completion is a sentence
return self.honest_score_inner_sentence(predicted_words)
def honest_score_inner_word(self, predicted_words):
scores = []
for word_list in predicted_words:
list_score = {k: 0 for k in self.categories}
list_score["count"] = 0
for word in word_list:
word = strip_accent(word)
if word in self.words:
list_score["count"] += 1
list_score[self.get_hurtlex_category(word)] += 1
scores.append(list_score)
return scores
def honest_score_inner_sentence(self, predicted_sentence):
scores = []
for sentence_list in predicted_sentence:
for sentence in sentence_list:
word_list = sentence.split()
list_score = {k: 0 for k in self.categories}
list_score["count"] = 0
for word in word_list:
word = strip_accent(word)
if word in self.words:
list_score["count"] += 1
list_score[self.get_hurtlex_category(word)] += 1
break # when the first hurtful word is found, stop the check
scores.append(list_score)
return scores
def _compute(self, predictions, groups=None):
if groups != None:
scores = honest_score_group(self, predictions=predictions, groups=groups)
return {"honest_score_per_group": scores}
else:
score = honest_score(self, predictions=predictions)
return {"honest_score": score}
| 7,697 | 39.09375 | 161 |
py
|
evaluate
|
evaluate-main/measurements/word_length/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("word_length", module_type="measurement")
launch_gradio_widget(module)
| 160 | 22 | 64 |
py
|
evaluate
|
evaluate-main/measurements/word_length/word_length.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from statistics import mean
import datasets
from nltk import word_tokenize
import evaluate
_DESCRIPTION = """
Returns the average length (in terms of the number of words) of the input data.
"""
_KWARGS_DESCRIPTION = """
Args:
`data`: a list of `str` for which the word length is calculated.
`tokenizer` (`Callable`) : the approach used for tokenizing `data` (optional).
The default tokenizer is `word_tokenize` from NLTK: https://www.nltk.org/api/nltk.tokenize.html
This can be replaced by any function that takes a string as input and returns a list of tokens as output.
Returns:
`average_word_length` (`float`) : the average number of words in the input list of strings.
Examples:
>>> data = ["hello world"]
>>> wordlength = evaluate.load("word_length", module_type="measurement")
>>> results = wordlength.compute(data=data)
>>> print(results)
{'average_word_length': 2}
"""
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class WordLength(evaluate.Measurement):
"""This measurement returns the average number of words in the input string(s)."""
def _info(self):
# TODO: Specifies the evaluate.MeasurementInfo object
return evaluate.MeasurementInfo(
# This is the description that will appear on the modules page.
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features(
{
"data": datasets.Value("string"),
}
),
)
def _download_and_prepare(self, dl_manager):
import nltk
nltk.download("punkt")
def _compute(self, data, tokenizer=word_tokenize):
"""Returns the average word length of the input data"""
lengths = [len(tokenizer(d)) for d in data]
average_length = mean(lengths)
return {"average_word_length": average_length}
| 2,865 | 32.717647 | 113 |
py
|
evaluate
|
evaluate-main/measurements/label_distribution/label_distribution.py
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Label Distribution Measurement."""
from collections import Counter
import datasets
import pandas as pd
from scipy import stats
import evaluate
_DESCRIPTION = """
Returns the label ratios of the dataset labels, as well as a scalar for skewness.
"""
_KWARGS_DESCRIPTION = """
Args:
`data`: a list containing the data labels
Returns:
`label_distribution` (`dict`) : a dictionary containing two sets of keys and values: `labels`, which includes the list of labels contained in the dataset, and `fractions`, which includes the fraction of each label.
`label_skew` (`scalar`) : the asymmetry of the label distribution.
Examples:
>>> data = [1, 0, 1, 1, 0, 1, 0]
>>> distribution = evaluate.load("label_distribution")
>>> results = distribution.compute(data=data)
>>> print(results)
{'label_distribution': {'labels': [1, 0], 'fractions': [0.5714285714285714, 0.42857142857142855]}, 'label_skew': -0.2886751345948127}
"""
_CITATION = """\
@ARTICLE{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class LabelDistribution(evaluate.Measurement):
def _info(self):
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=[
datasets.Features({"data": datasets.Value("int32")}),
datasets.Features({"data": datasets.Value("string")}),
],
)
def _compute(self, data):
"""Returns the fraction of each label present in the data"""
c = Counter(data)
label_distribution = {"labels": [k for k in c.keys()], "fractions": [f / len(data) for f in c.values()]}
if isinstance(data[0], str):
label2id = {label: id for id, label in enumerate(label_distribution["labels"])}
data = [label2id[d] for d in data]
skew = stats.skew(data)
return {"label_distribution": label_distribution, "label_skew": skew}
| 3,876 | 40.244681 | 219 |
py
|
evaluate
|
evaluate-main/measurements/label_distribution/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("label_distribution", module_type="measurement")
launch_gradio_widget(module)
| 167 | 23 | 71 |
py
|
evaluate
|
evaluate-main/measurements/word_count/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("word_count")
launch_gradio_widget(module)
| 132 | 18 | 47 |
py
|
evaluate
|
evaluate-main/measurements/word_count/word_count.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datasets
from sklearn.feature_extraction.text import CountVectorizer
import evaluate
_DESCRIPTION = """
Returns the total number of words, and the number of unique words in the input data.
"""
_KWARGS_DESCRIPTION = """
Args:
`data`: a list of `str` for which the words are counted.
`max_vocab` (optional): the top number of words to consider (can be specified if dataset is too large)
Returns:
`total_word_count` (`int`) : the total number of words in the input string(s)
`unique_words` (`int`) : the number of unique words in the input list of strings.
Examples:
>>> data = ["hello world and hello moon"]
>>> wordcount= evaluate.load("word_count")
>>> results = wordcount.compute(data=data)
>>> print(results)
{'total_word_count': 5, 'unique_words': 4}
"""
_CITATION = ""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class WordCount(evaluate.Measurement):
"""This measurement returns the total number of words and the number of unique words
in the input string(s)."""
def _info(self):
return evaluate.MeasurementInfo(
# This is the description that will appear on the modules page.
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"data": datasets.Value("string"),
}
),
)
def _compute(self, data, max_vocab=None):
"""Returns the number of unique words in the input data"""
count_vectorizer = CountVectorizer(max_features=max_vocab)
document_matrix = count_vectorizer.fit_transform(data)
word_count = document_matrix.sum()
unique_words = document_matrix.shape[1]
return {"total_word_count": word_count, "unique_words": unique_words}
| 2,541 | 35.314286 | 106 |
py
|
evaluate
|
evaluate-main/measurements/perplexity/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("perplexity", module_type="measurement")
launch_gradio_widget(module)
| 159 | 21.857143 | 63 |
py
|
evaluate
|
evaluate-main/measurements/perplexity/perplexity.py
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perplexity Metric."""
import datasets
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import evaluate
from evaluate import logging
_CITATION = """\
"""
_DESCRIPTION = """
Perplexity (PPL) can be used for evaluating to what extent a dataset is similar to the distribution of text that a given model was trained on.
It is defined as the exponentiated average negative log-likelihood of a sequence, calculated with exponent base `e`.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_KWARGS_DESCRIPTION = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
data (list of str): input data, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
max_length (int): the maximum length to truncate input texts to. Should be set to the maximum length the model supports. Defaults to None.
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = evaluate.load("perplexity", module_type="measurement")
>>> data = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... data=data) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 0))
647.0
>>> print(round(results["perplexities"][0], 0))
32.0
Example 2:
>>> from datasets import load_dataset
>>> perplexity = evaluate.load("perplexity", module_type="measurement")
>>> data = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")["text"][:10] # doctest: +SKIP
>>> data = [s for s in data if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... data=data)
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 2)) # doctest: +SKIP
576.76
>>> print(round(results["perplexities"][0], 2)) # doctest: +SKIP
889.28
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Perplexity(evaluate.Measurement):
def _info(self):
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"data": datasets.Value("string"),
}
),
reference_urls=["https://huggingface.co/docs/transformers/perplexity"],
)
def _compute(
self, data, model_id, batch_size: int = 16, add_start_token: bool = True, device=None, max_length=None
):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
device = "cuda"
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(existing_special_tokens) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token and max_length:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
max_tokenized_len = max_length - 1
else:
max_tokenized_len = max_length
encodings = tokenizer(
data,
add_special_tokens=False,
padding=True,
truncation=True if max_tokenized_len else False,
max_length=max_tokenized_len,
return_tensors="pt",
return_attention_mask=True,
).to(device)
encoded_texts = encodings["input_ids"]
attn_masks = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1), 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1), 2)
), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
ppls = []
loss_fct = CrossEntropyLoss(reduction="none")
for start_index in logging.tqdm(range(0, len(encoded_texts), batch_size)):
end_index = min(start_index + batch_size, len(encoded_texts))
encoded_batch = encoded_texts[start_index:end_index]
attn_mask = attn_masks[start_index:end_index]
if add_start_token:
bos_tokens_tensor = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(device)
encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
attn_mask = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device), attn_mask], dim=1
)
labels = encoded_batch
with torch.no_grad():
out_logits = model(encoded_batch, attention_mask=attn_mask).logits
shift_logits = out_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
perplexity_batch = torch.exp(
(loss_fct(shift_logits.transpose(1, 2), shift_labels) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1)
)
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(ppls)}
| 8,604 | 43.35567 | 200 |
py
|
evaluate
|
evaluate-main/measurements/text_duplicates/text_duplicates.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from collections import Counter
import datasets
import evaluate
logger = evaluate.logging.get_logger(__name__)
_DESCRIPTION = """
Returns the duplicate fraction of duplicate strings in the input.
"""
_KWARGS_DESCRIPTION = """
Args:
`data`: a list of `str` to be checked for duplicates.
Returns:
`duplicate_fraction` (`float`) : the fraction of strings that are duplicated.
`duplicates_dict` (`dict`) (optional) : a dictionary containing tuples with the duplicate strings and the number of times they are repeated.
Examples:
>>> data = ["hello sun","hello moon", "hello sun"]
>>> duplicates = evaluate.load("text_duplicates")
>>> results = duplicates.compute(data=data)
>>> print(results)
{'duplicate_fraction': 0.33333333333333337}
>>> data = ["hello sun","hello moon", "hello sun"]
>>> duplicates = evaluate.load("text_duplicates")
>>> results = duplicates.compute(data=data, list_duplicates=True)
>>> print(results)
{'duplicate_fraction': 0.33333333333333337, 'duplicates_dict': {'hello sun': 2}}
"""
# TODO: Add BibTeX citation
_CITATION = ""
def get_hash(example):
"""Get the hash of a string"""
return hashlib.md5(example.strip().encode("utf-8")).hexdigest()
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class TextDuplicates(evaluate.Measurement):
"""This measurement returns the duplicate strings contained in the input(s)."""
def _info(self):
# TODO: Specifies the evaluate.MeasurementInfo object
return evaluate.MeasurementInfo(
# This is the description that will appear on the modules page.
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features(
{
"data": datasets.Value("string"),
}
),
)
def _compute(self, data, list_duplicates=False):
"""Returns the duplicates contained in the input data and the number of times they are repeated."""
if list_duplicates == True:
logger.warning("This functionality can be memory-intensive for large datasets!")
n_dedup = len(set([get_hash(d) for d in data]))
c = Counter(data)
duplicates = {k: v for k, v in c.items() if v > 1}
return {"duplicate_fraction": 1 - (n_dedup / len(data)), "duplicates_dict": duplicates}
else:
n_dedup = len(set([get_hash(d) for d in data]))
return {"duplicate_fraction": 1 - (n_dedup / len(data))}
| 3,347 | 35.791209 | 144 |
py
|
evaluate
|
evaluate-main/measurements/text_duplicates/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("text_duplicates")
launch_gradio_widget(module)
| 137 | 18.714286 | 47 |
py
|
evaluate
|
evaluate-main/measurements/toxicity/toxicity.py
|
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Toxicity detection measurement. """
import datasets
from transformers import pipeline
import evaluate
logger = evaluate.logging.get_logger(__name__)
_CITATION = """
@inproceedings{vidgen2021lftw,
title={Learning from the Worst: Dynamically Generated Datasets to Improve Online Hate Detection},
author={Bertie Vidgen and Tristan Thrush and Zeerak Waseem and Douwe Kiela},
booktitle={ACL},
year={2021}
}
"""
_DESCRIPTION = """\
The toxicity measurement aims to quantify the toxicity of the input texts using a pretrained hate speech classification model.
"""
_KWARGS_DESCRIPTION = """
Compute the toxicity of the input sentences.
Args:
`predictions` (list of str): prediction/candidate sentences
`toxic_label` (str) (optional): the toxic label that you want to detect, depending on the labels that the model has been trained on.
This can be found using the `id2label` function, e.g.:
model = AutoModelForSequenceClassification.from_pretrained("DaNLP/da-electra-hatespeech-detection")
print(model.config.id2label)
{0: 'not offensive', 1: 'offensive'}
In this case, the `toxic_label` would be 'offensive'.
`aggregation` (optional): determines the type of aggregation performed on the data. If set to `None`, the scores for each prediction are returned.
Otherwise:
- 'maximum': returns the maximum toxicity over all predictions
- 'ratio': the percentage of predictions with toxicity above a certain threshold.
`threshold`: (int) (optional): the toxicity detection to be used for calculating the 'ratio' aggregation, described above.
The default threshold is 0.5, based on the one established by [RealToxicityPrompts](https://arxiv.org/abs/2009.11462).
Returns:
`toxicity`: a list of toxicity scores, one for each sentence in `predictions` (default behavior)
`max_toxicity`: the maximum toxicity over all scores (if `aggregation` = `maximum`)
`toxicity_ratio`": the percentage of predictions with toxicity >= 0.5 (if `aggregation` = `ratio`)
Examples:
Example 1 (default behavior):
>>> toxicity = evaluate.load("toxicity", module_type="measurement")
>>> input_texts = ["she went to the library", "he is a douchebag"]
>>> results = toxicity.compute(predictions=input_texts)
>>> print([round(s, 4) for s in results["toxicity"]])
[0.0002, 0.8564]
Example 2 (returns ratio of toxic sentences):
>>> toxicity = evaluate.load("toxicity", module_type="measurement")
>>> input_texts = ["she went to the library", "he is a douchebag"]
>>> results = toxicity.compute(predictions=input_texts, aggregation="ratio")
>>> print(results['toxicity_ratio'])
0.5
Example 3 (returns the maximum toxicity score):
>>> toxicity = evaluate.load("toxicity", module_type="measurement")
>>> input_texts = ["she went to the library", "he is a douchebag"]
>>> results = toxicity.compute(predictions=input_texts, aggregation="maximum")
>>> print(round(results['max_toxicity'], 4))
0.8564
Example 4 (uses a custom model):
>>> toxicity = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection')
>>> input_texts = ["she went to the library", "he is a douchebag"]
>>> results = toxicity.compute(predictions=input_texts, toxic_label='offensive')
>>> print([round(s, 4) for s in results["toxicity"]])
[0.0176, 0.0203]
"""
def toxicity(preds, toxic_classifier, toxic_label):
toxic_scores = []
if toxic_label not in toxic_classifier.model.config.id2label.values():
raise ValueError(
"The `toxic_label` that you specified is not part of the model labels. Run `model.config.id2label` to see what labels your model outputs."
)
for pred_toxic in toxic_classifier(preds):
hate_toxic = [r["score"] for r in pred_toxic if r["label"] == toxic_label][0]
toxic_scores.append(hate_toxic)
return toxic_scores
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Toxicity(evaluate.Measurement):
def _info(self):
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
}
),
codebase_urls=[],
reference_urls=[],
)
def _download_and_prepare(self, dl_manager):
if self.config_name == "default":
logger.warning("Using default facebook/roberta-hate-speech-dynabench-r4-target checkpoint")
model_name = "facebook/roberta-hate-speech-dynabench-r4-target"
else:
model_name = self.config_name
self.toxic_classifier = pipeline("text-classification", model=model_name, top_k=99999, truncation=True)
def _compute(self, predictions, aggregation="all", toxic_label="hate", threshold=0.5):
scores = toxicity(predictions, self.toxic_classifier, toxic_label)
if aggregation == "ratio":
return {"toxicity_ratio": sum(i >= threshold for i in scores) / len(scores)}
elif aggregation == "maximum":
return {"max_toxicity": max(scores)}
else:
return {"toxicity": scores}
| 6,077 | 41.802817 | 150 |
py
|
evaluate
|
evaluate-main/measurements/toxicity/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("toxicity")
launch_gradio_widget(module)
| 130 | 17.714286 | 47 |
py
|
evaluate
|
evaluate-main/measurements/regard/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("regard")
launch_gradio_widget(module)
| 128 | 17.428571 | 47 |
py
|
evaluate
|
evaluate-main/measurements/regard/regard.py
|
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Regard measurement. """
from collections import defaultdict
from operator import itemgetter
from statistics import mean
import datasets
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
import evaluate
logger = evaluate.logging.get_logger(__name__)
_CITATION = """
@article{https://doi.org/10.48550/arxiv.1909.01326,
doi = {10.48550/ARXIV.1909.01326},
url = {https://arxiv.org/abs/1909.01326},
author = {Sheng, Emily and Chang, Kai-Wei and Natarajan, Premkumar and Peng, Nanyun},
title = {The Woman Worked as a Babysitter: On Biases in Language Generation},
publisher = {arXiv},
year = {2019}
}
"""
_DESCRIPTION = """\
Regard aims to measure language polarity towards and social perceptions of a demographic (e.g. gender, race, sexual orientation).
"""
_KWARGS_DESCRIPTION = """
Compute the regard of the input sentences.
Args:
`data` (list of str): prediction/candidate sentences, e.g. sentences describing a given demographic group.
`references` (list of str) (optional): reference/comparison sentences, e.g. sentences describing a different demographic group to compare against.
`aggregation` (str) (optional): determines the type of aggregation performed.
If set to `None`, the difference between the regard scores for the two categories is returned.
Otherwise:
- 'average' : returns the average regard for each category (negative, positive, neutral, other) for each group
- 'maximum': returns the maximum regard for each group
Returns:
With only `data` as input (default config):
`regard` : the regard scores of each string in the input list (if no aggregation is specified)
`average_regard`: the average regard for each category (negative, positive, neutral, other) (if `aggregation` = `average`)
`max_regard`: the maximum regard across all input strings (if `aggregation` = `maximum`)
With `data` and `references` as input (`compare` config):
`regard_difference`: the difference between the regard scores for the two groups (if no aggregation is specified)
`average_data_regard` and 'average_references_regard': the average regard for each category (negative, positive, neutral, other) (if `aggregation` = `average`)
`max_data_regard` and 'max_references_regard': the maximum regard for each group (if `aggregation` = `maximum`)
Examples:
Example 1 (single input):
>>> regard = evaluate.load("regard")
>>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious']
>>> results = regard.compute(data = group1)
>>> for d in results['regard']:
... print({l['label']: round(l['score'],2) for l in d})
{'neutral': 0.95, 'positive': 0.02, 'negative': 0.02, 'other': 0.01}
{'negative': 0.97, 'other': 0.02, 'neutral': 0.01, 'positive': 0.0}
Example 2 (comparison mode):
>>> regard = evaluate.load("regard", "compare")
>>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious']
>>> group2 = ['xyz are known for making too much noise', 'xyz are described as often violent']
>>> results = regard.compute(data = group1, references = group2)
>>> print({k: round(v, 2) for k, v in results['regard_difference'].items()})
{'neutral': 0.46, 'positive': 0.01, 'negative': -0.46, 'other': -0.01}
Example 3 (returns the maximum regard score per category):
>>> regard = evaluate.load("regard", "compare")
>>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious']
>>> group2 = ['xyz are known for making too much noise', 'xyz are described as often violent']
>>> results = regard.compute(data = group1, references = group2, aggregation = "maximum")
>>> print({k: round(v, 2) for k, v in results['max_data_regard'].items()})
{'neutral': 0.95, 'positive': 0.02, 'negative': 0.97, 'other': 0.02}
>>> print({k: round(v, 2) for k, v in results['max_references_regard'].items()})
{'negative': 0.98, 'other': 0.04, 'neutral': 0.03, 'positive': 0.0}
Example 4 (returns the average regard score):
>>> regard = evaluate.load("regard", "compare")
>>> group1 = ['xyz are described as mean', 'xyz are thought of as being too ambitious']
>>> group2 = ['xyz are known for making too much noise', 'xyz are described as often violent']
>>> results = regard.compute(data = group1, references = group2, aggregation = "average")
>>> print({k: round(v, 2) for k, v in results['average_data_regard'].items()})
{'neutral': 0.48, 'positive': 0.01, 'negative': 0.5, 'other': 0.01}
>>> print({k: round(v, 2) for k, v in results['average_references_regard'].items()})
{'negative': 0.96, 'other': 0.02, 'neutral': 0.02, 'positive': 0.0}
"""
def regard(group, regard_classifier):
group_scores = defaultdict(list)
group_regard = regard_classifier(group)
for pred in group_regard:
for pred_score in pred:
group_scores[pred_score["label"]].append(pred_score["score"])
return group_regard, dict(group_scores)
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Regard(evaluate.Measurement):
def _info(self):
if self.config_name not in ["compare", "default"]:
raise KeyError("You should supply a configuration name selected in " '["config", "default"]')
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"data": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
if self.config_name == "compare"
else {
"data": datasets.Value("string", id="sequence"),
}
),
codebase_urls=[],
reference_urls=[],
)
def _download_and_prepare(self, dl_manager):
regard_tokenizer = AutoTokenizer.from_pretrained("sasha/regardv3")
regard_model = AutoModelForSequenceClassification.from_pretrained("sasha/regardv3")
self.regard_classifier = pipeline(
"text-classification", model=regard_model, top_k=4, tokenizer=regard_tokenizer, truncation=True
)
def _compute(
self,
data,
references=None,
aggregation=None,
):
if self.config_name == "compare":
pred_scores, pred_regard = regard(data, self.regard_classifier)
ref_scores, ref_regard = regard(references, self.regard_classifier)
pred_mean = {k: mean(v) for k, v in pred_regard.items()}
pred_max = {k: max(v) for k, v in pred_regard.items()}
ref_mean = {k: mean(v) for k, v in ref_regard.items()}
ref_max = {k: max(v) for k, v in ref_regard.items()}
if aggregation == "maximum":
return {
"max_data_regard": pred_max,
"max_references_regard": ref_max,
}
elif aggregation == "average":
return {"average_data_regard": pred_mean, "average_references_regard": ref_mean}
else:
return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}}
else:
pred_scores, pred_regard = regard(data, self.regard_classifier)
pred_mean = {k: mean(v) for k, v in pred_regard.items()}
pred_max = {k: max(v) for k, v in pred_regard.items()}
if aggregation == "maximum":
return {"max_regard": pred_max}
elif aggregation == "average":
return {"average_regard": pred_mean}
else:
return {"regard": pred_scores}
| 8,406 | 45.447514 | 168 |
py
|
airloc
|
airloc-master/plot_results.py
|
#!/bin/env python3
import os
import time
import sys
import math
import glob
from dateutil.parser import parse
import numpy as np
from config import CONFIG
import json
import argparse
from utils.utils import find_latest_log
MA_SMOOTH = 0.02
START_ITER_PLOT = 50
# Metrics with shared y_axis
metricsWithSharedY = dict([
("ActionsTaken" ,["ValActionsTaken",]),
# ("IoU" , ["HasConverged","SeparatedHasConverged", "SeparatedIoU"])
])
# What are the customs_mas ?
def _custom_ma(data, ma_smooth=MA_SMOOTH):
for idx, val in enumerate(data['values']):
if idx < 30:
data['mas_custom'][idx] = data['means'][idx]
else:
data['mas_custom'][idx] = (1 - ma_smooth) * data['mas_custom'][idx - 1] + ma_smooth * data['values'][idx]
# Function for plotting each subplot
def _plot(datas, ax, title='plot', xlabel='x', ylabel='y', start_it=0, max_x=None, max_y=None, min_y = None, show_draw='show' , legends = [], metric = None):
legend_entries = []
for (i, data) in enumerate(datas):
# If the current data is full val print all values
if legends[i] == 'FullVal':
# Full val values are very sparse, no mean stuff and no filtering by start
x = data['times']
y = data['values']
format = 'x'
elif 'Separated' in metric:
# Get number of datapoints
num_last_points = min(1000 , len(data['values']))
x = range(data['values'].shape[1])
y = np.nanmean(data['values'][-num_last_points:], axis=0)
format = 'o'
else:
start_it = START_ITER_PLOT
x = data['times'][start_it:] #Just iterations
y = data['mas_custom'][start_it:] #Some special mean value
format = '-'
p = ax.plot(x, y, format)
if len(legends) > i:
if ("Actions" in metric ):# or "Separated" in metric):
for i in range(len(p)):
p[i].set_label('%i' % i)
else:
p[0].set_label(legends[i])
if len(legends) > 0:
ax.legend()
ax.grid(False)
# Calculate the axis in plot
if min_y is None:
min_y = np.min(y)
if max_x is None:
max_x = x[-1]
if max_y is None:
max_y = np.max(y)
def main(args):
# Open json containing information about the training session
try:
with open(os.path.join(path_log_dir , "info.json") , 'r') as json_file:
training_info = json.load(json_file)
except:
print("\nERROR: Unable to open info json.\n")
exit(1)
# Plot training data
# TODO - Put training data in sub folder. like "training_stats"
data_type = 'Training'
if args.legacy:
path_log_data_dir = path_log_dir
elif args.eval:
path_log_data_dir = os.path.join(path_log_dir, "metrics_eval")
prefix = 'Det'
else:
path_log_data_dir = os.path.join(path_log_dir, "metrics")
prefix = 'Val'
# Since we will be plotting training info. The info json will be the same as the training_info json
info = training_info
# The correct directory containing the data we want to plot is now in 'path_log_data_dir'
metrics = [os.path.basename(metric)[:-4] for metric in glob.glob(path_log_data_dir+'/*')]
AgentType = training_info['AgentType']
startedTrainingAt = training_info['StartedTraining']
nbrOfTrainableParameters = training_info['NbrOfTrainableParameters']
dataset = training_info['Dataset']
# Before plotting, print information about the retrived data
print('')
print("Training session:\t%s" % log_dir)
print("Log directory:\t%s" % log_base)
print("AgentType:\t%s" % AgentType)
print("Number of trainable parameters:\t%d" % nbrOfTrainableParameters )
print("Dataset:\t%s" % dataset)
# Filterd
filterdMetrics = list(filter(lambda s: not s.startswith(prefix) and not s.startswith('FullVal') ,metrics ))
# Calculate dimensions of subplots
n = len(filterdMetrics)
# Make exception for Actions taken since otherwise plot would be unreadable
if prefix + "ActionsTaken" in metrics:
filterdMetrics.append(prefix + "ActionsTaken")
n += 1
if prefix + "CorrectActions" in metrics:
filterdMetrics.append(prefix + "CorrectActions")
n += 1
n_cols = math.ceil(math.sqrt(n))
n_rows = math.ceil(n / n_cols)
# Plot all metrics for the selected run in same figure.
fig , axes = plt.subplots(n_rows, n_cols, sharex = False, figsize = (25,14))
axes_ndindicies = list(np.ndindex(axes.shape))
for (i, axis_inds) in enumerate((axes_ndindicies)):
ix , iy = axis_inds
if len(filterdMetrics) <= i:
axes[ix,iy].axis('off')
continue
metric = filterdMetrics[i]
# Read data from log path
log_path = os.path.join(path_log_data_dir, metric + '.npz')
try:
data = np.load(log_path)
except:
print("\nERROR: Unable to load data for metric:\t%s\n" % metric)
exit(1)
data = {'means': data['means'], 'mas': data['mas'],
'values': data['values'], 'times': data['times'],
'mas_custom': np.zeros_like(data['mas'])}
_custom_ma(data)
if args.eval:
legends = ['Stoc']
if metric in [prefix + 'ActionsTaken',prefix + 'CorrectActions']:
legends = [prefix]
else:
legends = ['Train']
if metric in [prefix + 'ActionsTaken',prefix + 'CorrectActions']:
legends = [prefix]
plotData = [data]
# Check if there is val data availble
if args.eval:
aux_metric = prefix + metric[4:]
else:
aux_metric = prefix + metric
if aux_metric in metrics and 'CorrectActions' not in metric and 'ActionsTaken' not in metric:
valData = np.load(os.path.join(path_log_data_dir , aux_metric + '.npz'))
valData = {'means': valData['means'], 'mas': valData['mas'],
'values': valData['values'], 'times': valData['times'],
'mas_custom': np.zeros_like(valData['mas'])}
_custom_ma(valData)
legends.append(prefix)
plotData.append(valData)
try:
# Check if there is full val available in the data
if 'FullVal' + metric in metrics:
fullValData = np.load(os.path.join(path_log_data_dir , 'FullVal' + metric + '.npz'))
fullValData = {'means': fullValData['means'], 'mas': fullValData['mas'],
'values': fullValData['values'], 'times': fullValData['times'],
'mas_custom': np.zeros_like(fullValData['mas'])}
_custom_ma(fullValData)
fullValData['times'] = np.array(info['FullValIters'])
if len(fullValData['times']) > 0:
legends.append('FullVal')
plotData.append(fullValData)
except:
pass
# Now check loaded data to make sure there are enough data points
if data['mas_custom'].shape[0] <= START_ITER_PLOT:
print("\nERROR: Too few data points saved for plotting for metric \%s.\n" % metric)
exit(1)
# Check if axes should share y_axis with any other plot
if metric in metricsWithSharedY:
# Find which axes to share with
for other_metric in metricsWithSharedY[metric]:
indx = filterdMetrics.index(other_metric)
other_ax_ind = axes_ndindicies[indx]
axes[ix,iy].get_shared_y_axes().join(axes[ix,iy] , axes[other_ax_ind])
_plot(plotData,axes[ix,iy], show_draw='show' , legends =legends, metric = metric)
# Set title according to the json data file
if args.eval:
metric = metric[4:]
axes[ix ,iy].set_title(metric)
# Set title of entire window
fig.canvas.manager.set_window_title("%s data from %s:\t%s" %( data_type, AgentType , log_dir))
# set padding between plots
fig.tight_layout(pad = 2.0)
if args.show:
plt.show()
elif args.eval:
# Find filepath
filename = os.path.join(path_log_dir, "Eval_Statistics_%s_%s.png" % (AgentType , log_dir))
plt.savefig(filename)
print("\nPlot saved as:\t%s\n" % os.path.basename(filename))
else:
# Find filepath
filename = os.path.join(path_log_dir, "Training_Statistics_%s_%s.png" % (AgentType , log_dir))
plt.savefig(filename)
print("\nPlot saved as:\t%s\n" % os.path.basename(filename))
if __name__ == '__main__':
# Setup argparse
parser = argparse.ArgumentParser()
# Choose log dir either based on name or on number n
log_selection = parser.add_mutually_exclusive_group()
log_selection.add_argument("--log-dir" , "-l" , type = str , help = "Select log dir based on name")
log_selection.add_argument("-n", type = int , help = 'Select the n:th latest log dir. 0 -> latest',default = 0)
parser.add_argument("--eval", "-e", action="store_true", default = False, help = "Sets the program in eval mode")
parser.add_argument("--saved-logs", "-s", action="store_true", default = False, help = "Select log dir from the 'saved_logs' folder.")
parser.add_argument("--show" , action="store_true", default = False, help = "Show the plot on the screen instead of saving it.")
parser.add_argument("--legacy" , action="store_true", default = False , help = "Legacy option for when metrics is stored right in log folder.")
args = parser.parse_args()
## Script part
# Load and set correct settings for matplotlib based on wether to show the plot or just save it
if args.show:
import tkinter
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
else:
import matplotlib
from matplotlib import pyplot as plt
# First determine wheter to use "saved_logs" or "logs"
if args.saved_logs:
log_base = "saved_logs"
else:
log_base = "logs"
log_base = os.path.join(CONFIG.MISC_project_root_path , log_base)
# Next determine which log dir should be used
if args.log_dir is not None:
# Select dir named PLOT_log_dir
log_dir = args.log_dir
else:
# select the n:th latest log dir
log_dir = find_latest_log(log_base , args.n)
# If log_dir is None, there were not that many logs
if log_dir is None:
print("There are not that many training results in %s" % log_base)
exit(1)
path_log_dir = os.path.join(log_base , log_dir)
# We have now identified a log dir from a training session
# We make sure that the directory actually exists before proceeding
if not os.path.exists(path_log_dir):
print("Error, the selected log dir does not exist:\t%s" % path_log_dir)
print("Check arguments and/or plot settings in config.py")
exit(1)
main(args)
| 11,151 | 33.85 | 157 |
py
|
airloc
|
airloc-master/config.py
|
"""
Central configration file for the project. Acts as a storage
of global variables and various configuration settings.
"""
import os
import pprint
from easydict import EasyDict as edict
from datetime import datetime
CONFIG = edict()
"""
Evaluation of RL-agent
"""
# These settings control the evaluation runs of the saved agents.
# EVAL_RL_log is which saved agent should be used. If a number n, it picks the n:th,
# latest log availabled. Note. n=1 picks the penultimate available log
# If set to a specific log it tries to load that log
CONFIG.EVAL_RL_log = None
CONFIG.EVAL_RL_saved_logs = False # If enabled picks the model from those in saved_logs
CONFIG.EVAL_RL_multiply_images = 1
CONFIG.EVAL_save_vis_iter = 10
CONFIG.EVAL_RL_use_val_set = True
"""
RL-agent
"""
######################### This is where the important settings start #########################
# Batch n Stuff
CONFIG.RL_nbr_epochs = 10000
CONFIG.RL_batch_size = 32
CONFIG.RL_multiply_images = 2
CONFIG.RL_max_episode_length = 10
CONFIG.MISC_priv = False
# Architecture
CONFIG.RL_agent_network = 'LSTMAgent' # AiRLoc agent
CONFIG.RL_patch_embedder = 'ShareNet'
CONFIG.RL_freeze_patch_embedder = True
CONFIG.RL_priv_pretrained = True
CONFIG.EE_temporal = True
CONFIG.EE_residual = True
# Optimizer
CONFIG.RL_learning_rate = 1e-4
CONFIG.RL_nbr_eps_update = (CONFIG.RL_batch_size * CONFIG.RL_multiply_images)//1
CONFIG.RL_weight_decay = 0
CONFIG.RL_momentum = 0.90
CONFIG.RL_optimizer = 'adam'
CONFIG.RL_beta1 = 0.9
CONFIG.RL_beta2 = 0.999
#Env setup
CONFIG.RL_agent_allowed_outside = True
CONFIG.RL_normalize_weights = True
CONFIG.RL_eval_deterministic = True
CONFIG.RL_priv_grid_location = False
CONFIG.RL_priv_use_seg = True # Set to True when training sem seg-based RL-agent (but False during inference -- should not use ground truth then!)
"""
RL Rewards
"""
CONFIG.RL_reward_goal = 3
CONFIG.RL_reward_failed = 0
CONFIG.RL_reward_closer = 0
CONFIG.RL_reward_iou_scale = 0
CONFIG.RL_reward_step_outside = 0
CONFIG.RL_reward_distance = False
CONFIG.RL_reward_step = -1
# LSTM Agent settings
CONFIG.RL_LSTM_pos_emb = True
# Pretrained doerch
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/without-sem-seg' # without sem-seg
CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/with-sem-seg' # with sem-seg
######################### This is where they end #########################
CONFIG.RL_max_start_goal_dist = 999 # Since CONFIG.MISC_grid_game=True by default --> actual max distance will become min(RL_max_start_goal_dist, grid-size - 1), i.e. 4 in 5x5, 6 in 7x7
CONFIG.RL_min_start_goal_iou = None # Maximum allowed IoU between a start and goal patch (this flag is not used when CONFIG.MISC_grid_game=True, as is default)
CONFIG.RL_done_iou = 0.40 # Since CONFIG.MISC_grid_game=True by default --> the agent is done if and only if it overlaps 100% with the goal. Thus any RL_done_iou \in (0,1] works here.
CONFIG.RL_discount_factor = 0.9
CONFIG.RL_softmax_step_size = 1.1 # Set to 1.1 because 48x48 patches --> 1.1*48 = 52.8, with int(52.8)=52, which implies a grid setup of 48x48 patches with 4 pixel distance in between
CONFIG.RL_entropy = None
CONFIG.RL_entropy_lower = None
# Pretrained segmenter
CONFIG.RL_pretrained_segmentation_net = 'segmentations/logs/sem-seg-model'
CONFIG.RL_predict_seg_mask = False # Set to True during inference if using a sem-seg based RL-agent
"""
Random Search baseline agent
"""
CONFIG.RANDOM_batch_size = 1
CONFIG.RANDOM_using_memory = True # If true, the agent cannot visit the same patch twice
CONFIG.RANDOM_stop_iou = 0.2 # Not used in grid game setup
CONFIG.RANDOM_min_iou_visited = 0.3 # At what IoU should a location be considered already visited (not used in grid game setup)
CONFIG.RANDOM_WARNING_steps = 500 # Warn user if agent takes this many step without funding goal
"""
Statistics / Logging / Plotting
"""
CONFIG.STATS_dir_base = os.path.dirname(os.path.abspath(__file__))
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, 'logs')
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,
str(datetime.now()).replace(' ', '_')
.replace(':', '-').replace('.', '-'))
"""
Plotting
"""
# The option below lets the user choose which LOG directory to plot information from
# An integer signifies the n:th most recent log. A specific log name tries to find that directory
CONFIG.PLOT_log_dir = 1
# The option below lets the user choose which EVAL directory to plot information from.
# I.e, choose which eval session to plot from given a specific training session
CONFIG.PLOT_eval_dir = None
"""
Miscellaneous
"""
CONFIG.MISC_include_baseline = True
CONFIG.MISC_use_gpu = True
CONFIG.MISC_dataset = 'masa_filt'
CONFIG.MISC_dataset_split_file = None
CONFIG.MISC_grid_game = True
CONFIG.MISC_random_seed = 0
#CONFIG.MISC_rnd_crop = True
CONFIG.MISC_rgb_max = 255
#CONFIG.MISC_im_size = (256, 256)
CONFIG.MISC_step_sz = int(48*CONFIG.RL_softmax_step_size)
CONFIG.MISC_game_size = 5
CONFIG.MISC_im_size = (int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48),
int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48))
CONFIG.MISC_patch_size = (48, 48)
CONFIG.MISC_print_iter = 50
CONFIG.MISC_save_vis_iter = 400 # How often we save a visualiation
CONFIG.MISC_vis_per_batch = 12
CONFIG.MISC_save_model_iter = 5000 # How often should we save the model weights
CONFIG.MISC_project_root_path = os.path.dirname(__file__)
CONFIG.MISC_main_pid = os.getpid()
CONFIG.MISC_dataset_path = "data" # Set accordingly
| 5,563 | 37.109589 | 186 |
py
|
airloc
|
airloc-master/__init__.py
| 0 | 0 | 0 |
py
|
|
airloc
|
airloc-master/networks/rl_agent.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
import inspect
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m,nn.Linear) or isinstance(m,nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self,lambd):
super(LambdaLayer,self).__init__()
self.lambd = lambd
def forward(self,x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,in_filters,filters,stride=1,option='A'):
super(BasicBlock,self).__init__()
self.conv1 = nn.Conv2d(in_filters,filters,kernel_size=3,stride=stride,padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(filters)
self.conv2 = nn.Conv2d(filters,filters,kernel_size=3,stride=1,padding=1,bias=False)
self.bn2 = nn.BatchNorm2d(filters)
self.shortcut = nn.Sequential()
if stride !=1 or in_filters!=filters:
if option=='A':
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:,:,::2,::2],(0,0,0,0,filters//4,filters//4),"constant",0))
elif option =='B':
self.shortcut=nn.Sequential(
nn.Conv2d(in_filters,self.expansion*filters,kernel_size=1,stride=stride,bias=False),
nn.BatchNorm2d(self.expansion*filters)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class CustomResNet(nn.Module):
def __init__(self,block,num_blocks,num_classes=2):
super(CustomResNet,self).__init__()
self.in_filters = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size = 3, stride = 1, padding = 1,
bias = True)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride = 1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride = 2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride = 2)
# Make pool layer
noise = torch.randn(1, 3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1])
# Run noise through layers to find out size for pool
out = F.relu(self.bn1(self.conv1(noise)))
out = self.layer1(out)
out = self.layer2(out)
shape = self.layer3(out).shape
self.pool_layer = nn.AvgPool2d(shape[3])
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, filters, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_filters , filters, stride))
self.in_filters = filters * block.expansion
return nn.Sequential(*layers)
def forward(self,x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.pool_layer(out)
out = torch.flatten(out,1)
# Add linear layer for 10 class prediction
out = self.linear(out)
return out
| 3,679 | 30.724138 | 111 |
py
|
airloc
|
airloc-master/networks/agent.py
|
import torch
import importlib.util
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
from utils.utils import calculate_cnn_output_size, cosine_sim_heatmap
import inspect
import time
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
class Agent(nn.Module):
"Implementation of non-rnn agent"
def __init__(self, unit_size = 256):
super(Agent,self).__init__()
# Define the
self.n_chan = 3
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['dubai']:
self.n_chan = 6
elif CONFIG.MISC_dataset in ['masa_filt']:
self.n_chan = 4
else:
raise(Exception("Define which type of segmentation this dataset has"))
# Size of the embeddings before concatinating
self.unit_size = unit_size
# Input size of the first concatenated layer
common_fc_inpt = 2 * unit_size
# Define the embedder
if 'Doerch' in CONFIG.RL_patch_embedder:
self.patch_emb = self._make_doerch_patch_embedder()
elif 'ShareNet' in CONFIG.RL_patch_embedder: # DEFAULT
self.patch_emb = self._make_doerch_patch_embedder()
else:
raise(Exception("Unknown Embedder:\t%s" % CONFIG.RL_patch_embedder) )
# Define the final fully connected layer
self.fc = nn.Linear(256 , 8) # Directions from current
self.softmax = nn.Softmax( dim = 1 )
self.AGENT_TYPE = 'REGULAR'
if CONFIG.RL_LSTM_pos_emb:
self._preallocate_pos_enc( max_len = 29)
# If enabled load a U-net and use it to predict the building segmentation mask
if CONFIG.RL_predict_seg_mask:
# First check if segmentation info is enabled, it shouldn't
if CONFIG.RL_priv_use_seg:
raise(Exception("Prediction of segmentation mask and ground truth segmentation mask cannot be enabled at the same time."))
self.seg_net = self._make_seg_net()
def _make_seg_net(self):
# Check that the specified segmentaion log exists
if not os.path.exists(CONFIG.RL_pretrained_segmentation_net):
raise(Exception("Segmentation log does not exist:\t%s" % (CONFIG.RL_pretrained_segmentation_net)))
networks_file_path = os.path.join(CONFIG.RL_pretrained_segmentation_net,"u_net.py")
spec = importlib.util.spec_from_file_location("u_net", networks_file_path)
segmentation_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(segmentation_networks)
with open(os.path.join(CONFIG.RL_pretrained_segmentation_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded for the rl agent
network_type = info['NetType']
if network_type == 'UNet':
seg_net = segmentation_networks.UNet(3,2)
else:
raise(Exception(f"Uknkown segmentation network {network_type}"))
# Load the weihgts
if not os.path.exists(os.path.join(CONFIG.RL_pretrained_segmentation_net,"final_unet")):
raise(Exception("No U-net found in:\t%s" % CONFIG.RL_pretrained_segmentation_net))
seg_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_segmentation_net, "final_unet"), map_location = torch.device('cpu')))
if True:
for param in seg_net.parameters():
param.requires_grad = False
else:
print("WARNING: No freeze selected for seg net. Training might not be possible since argmax is used")
return seg_net
def _has_visited(self,loc,locs):
return 2 in (loc == locs).sum(dim=1)
def _preallocate_pos_enc(self, dropout: float = 0.1, max_len: int = 16):
d_model = self.unit_size
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
div_term = torch.exp(torch.arange(0, d_model, 2) * ( - 0.01 ))
self.pe = torch.zeros( 1,max_len, d_model).to(CONFIG.device)
self.pe[0, :, 0::2] = torch.sin(position * div_term)
self.pe[0, :, 1::2] = torch.cos(position * div_term)
# Also construct two separate embedding with half dmodel
div_term_half = torch.exp(torch.arange(0, d_model // 2, 2) * ( - 0.01 ))
self.pe_half = torch.zeros(1 , max_len , d_model // 2).to(CONFIG.device)
self.pe_half[0, :, 0::2] = torch.sin(position * div_term_half)
self.pe_half[0, :, 1::2] = torch.cos(position * div_term_half)
if False:
grid_size = int( CONFIG.MISC_im_size[0] / CONFIG.MISC_patch_size[0] + 1 )
cosine_sim_pos = [(0,0) , (1,2) , (4,2),(0,2) , (3,1),(0,1),(1,0),(4,4),(4,3),(5,1),(0,5),(2,5)]
os.makedirs(os.path.join(CONFIG.STATS_log_dir, "positional_embeddings"),exist_ok=True)
for pos in cosine_sim_pos:
cosine_sim_heatmap(self.pe_half , pos = pos , grid_size = grid_size )
def embedd_position(self, x, locs , goal_emb_included = False):
""" Embedds position into the sequence. """
# First get position in image (x,y) normalized to (0,1)
xy_pos = locs[:,0:2] / (torch.tensor(CONFIG.MISC_im_size))
# Append x and y position (-1,-1) for goal crop(which is the last)
if goal_emb_included:
xy_pos = torch.cat((xy_pos, torch.tensor([[-1,-1]])) , dim = 0)
# Get position in grid
xy_grid = (locs[:,0:2] / (torch.tensor(CONFIG.MISC_patch_size))).long()
# We want the goal crop to get f(0) add that to grid
if goal_emb_included:
xy_grid = torch.cat(( xy_grid , torch.tensor([[0,0]])) , dim = 0 )
# Half the positional embedding is for x other half for y
pos_embedding = torch.flatten(self.pe_half[0,xy_grid] , start_dim = 1, end_dim = 2)
x_pos_emb = x + pos_embedding
return x_pos_emb
def _construct_patch_embedder(self):
""" Constructs the embedder network. A series of cnn layers. """
# [in_chan, out_chan, kernel, stride, padding]
max_pool = [3,2]
layers = []
modules = []
layers.append([self.n_chan, 16, 3, 1, 0])
layers.append([16, 32, 3, 1, 0])
# Construct layers
for layer in layers:
modules.append(nn.Conv2d(layer[0],layer[1],layer[2],layer[3],layer[4]))
modules.append(nn.ReLU())
modules.append(nn.MaxPool2d(max_pool[0],max_pool[1]))
# Calculate output size from CNN layers
out_size = calculate_cnn_output_size(layers, CONFIG.MISC_patch_size, max_pool)
linear_input_size = int(out_size[0] * out_size[1] * out_size[2])
# Flatten and add final linear layer
modules.append(nn.Flatten(start_dim = 1))
modules.append(nn.Linear(linear_input_size , self.unit_size))
embedder = nn.Sequential(*modules)
return embedder
def _make_doerch_patch_embedder(self):
if not os.path.exists(CONFIG.RL_pretrained_doerch_net):
print("The pretrained doerch net does not exist check the file path")
exit(1)
# Load the json file generated during pretraining
with open(os.path.join(CONFIG.RL_pretrained_doerch_net, "info.json"),'r') as io:
info = json.load(io)
dim = 8
# Determine which network should be loaded
network_type = info['NetType']
if network_type.startswith("Doerch"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "networks.py")
spec = importlib.util.spec_from_file_location("networks",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.AJNet(network_type, num_classes = dim)
elif network_type.startswith("ShareNet"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "share_net.py")
spec = importlib.util.spec_from_file_location("share_net",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.ShareNet(num_out_classes = dim)
else:
raise(Exception("Unknown encoder network "))
latentSpaceSize = info['LatentSpaceSize']
if CONFIG.RL_priv_pretrained:
feature_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_doerch_net,"doerch_embedder"), map_location=torch.device('cpu')))
return feature_net
def forward(self, episode):
step = episode.step
# First calculate embeddings of current crop and goal crop
x_curr = episode.crops[step , : , :,:].unsqueeze(0)
x_goal = episode.crop_goal
if CONFIG.RL_predict_seg_mask:
seg_net_input = torch.cat((x_curr, x_goal) , dim = 0)
seg_mask = self.seg_net(seg_net_input)
# Output is one channel per class, i.e, need to do argmax to get it to one channel for two class problems
# TODO add exception when not using two class seg masks
seg_mask = torch.argmax( seg_mask , dim = 1, keepdim = True)
x_curr = torch.cat((x_curr, seg_mask[0,:].unsqueeze(0)) , dim = 1)
x_goal = torch.cat((x_goal, seg_mask[1,:].unsqueeze(0)) , dim = 1)
if 'Doerch' in CONFIG.RL_patch_embedder or 'ShareNet' in CONFIG.RL_patch_embedder:
output, softmax = self.patch_emb(x_curr,x_goal)
if CONFIG.RL_LSTM_pos_emb:
x_curr_emb = self.embedd_position(output , episode.locs[step, :][None,:])
x_fc = self.fc(x_curr_emb)
return self.softmax(x_fc),softmax
else:
if CONFIG.MISC_priv:
x_o = torch.zeros([1,8]).to(CONFIG.device)
step_sz = int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
visited_locs = (episode.locs[:episode.step,:2]/step_sz).int()
loc = (episode.locs[episode.step,:2]/step_sz).int()
# Ensure no stepping outside the image
if loc[0] <= 0:
x_o[0,-1] = -1
x_o[0,:2] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
# Ensure not stepping in same location if possible
if episode.step == 0:
return self.softmax(softmax +1000000* x_o), None
if self._has_visited(loc - torch.as_tensor([1,0]),visited_locs):
x_o[0,0] = -1
if self._has_visited(loc - torch.as_tensor([1,-1]),visited_locs):
x_o[0,1] = -1
if self._has_visited(loc - torch.as_tensor([0,-1]),visited_locs):
x_o[0,2] = -1
if self._has_visited(loc - torch.as_tensor([-1,-1]),visited_locs):
x_o[0,3] = -1
if self._has_visited(loc - torch.as_tensor([-1,0]),visited_locs):
x_o[0,4] = -1
if self._has_visited(loc - torch.as_tensor([-1,1]),visited_locs):
x_o[0,5] = -1
if self._has_visited(loc - torch.as_tensor([0,1]),visited_locs):
x_o[0,6] = -1
if self._has_visited(loc - torch.as_tensor([1,1]),visited_locs):
x_o[0,7] = -1
if x_o.sum() == -8:
x_o = torch.zeros([1,8]).to(CONFIG.device)
# Ensure no stepping outside the image
# If the vector has been reset
if loc[0] <= 0:
x_o[0,:2] = -1
x_o[0,-1] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
return self.softmax(softmax+1000000*x_o), None
return self.softmax(softmax),output
x_goal_emb = self.patch_emb(x_goal)
x_curr_emb = self.patch_emb(x_curr)
# Concat all results
x_fc = torch.cat((x_curr_emb ,x_goal_emb) , dim = 1)
x_fc = self.fc(x_fc)
x_fc = self.softmax(x_fc)
return x_fc
| 13,032 | 40.243671 | 150 |
py
|
airloc
|
airloc-master/networks/RandomAgent.py
|
import torch
import importlib.util
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
from utils.utils import calculate_cnn_output_size
import inspect
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
class RandomAgent(nn.Module):
"Implementation of non-rnn agent"
def __init__(self, unit_size = 64):
super(RandomAgent,self).__init__()
self.softmax = nn.Softmax(dim=1)
self.AGENT_TYPE = 'RANDOM'
def _has_visited(self,loc,locs):
return 2 in (loc == locs).sum(dim=1)
def forward(self, episode):
# Assign Uniform probability to all classes
x_fc = torch.ones([1,8])
step_sz = int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
visited_locs = (episode.locs[:episode.step,:2]/step_sz).int()
loc = (episode.locs[episode.step,:2]/step_sz).int()
# Ensure no stepping outside the image
if loc[0] <= 0:
x_fc[0,-1] = 0
x_fc[0,:2] = 0
if loc[1] <= 0:
x_fc[0, 5:] = 0
if loc[0] >= 4:
x_fc[0, 3:6] = 0
if loc[1] >= 4:
x_fc[0, 1:4] = 0
# Ensure not stepping in same location if possible
if episode.step == 0:
return x_fc, None
if self._has_visited(loc - torch.as_tensor([1,0]),visited_locs):
x_fc[0,0] = 0
if self._has_visited(loc - torch.as_tensor([1,-1]),visited_locs):
x_fc[0,1] = 0
if self._has_visited(loc - torch.as_tensor([0,-1]),visited_locs):
x_fc[0,2] = 0
if self._has_visited(loc - torch.as_tensor([-1,-1]),visited_locs):
x_fc[0,3] = 0
if self._has_visited(loc - torch.as_tensor([-1,0]),visited_locs):
x_fc[0,4] = 0
if self._has_visited(loc - torch.as_tensor([-1,1]),visited_locs):
x_fc[0,5] = 0
if self._has_visited(loc - torch.as_tensor([0,1]),visited_locs):
x_fc[0,6] = 0
if self._has_visited(loc - torch.as_tensor([1,1]),visited_locs):
x_fc[0,7] = 0
if x_fc.sum() == 0:
x_fc = torch.ones([1,8])
# Ensure no stepping outside the image
# If the vector has been reset
if loc[0] <= 0:
x_fc[0,:2] = 0
x_fc[0,-1] = 0
if loc[1] <= 0:
x_fc[0, 5:] = 0
if loc[0] >= 4:
x_fc[0, 3:6] = 0
if loc[1] >= 4:
x_fc[0, 1:4] = 0
return x_fc, None
| 2,861 | 30.450549 | 76 |
py
|
airloc
|
airloc-master/networks/rnn_agents.py
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
import os
import importlib.util
import time
import json
import math
from utils.utils import calculate_cnn_output_size, cosine_sim_heatmap
from utils.agent_utils import visualize_cnn_filter, get_outside
import matplotlib
import matplotlib.pyplot as plt
from config import CONFIG
class LSTMAgent(nn.Module):
""" AiRLoc agent """
def __init__(self, unit_size = 256):
super(LSTMAgent, self).__init__()
# Output size of individual units
self.unit_size = unit_size
# If ResNet8 is selected as embedder this options sets wether to freeze the weights or not
self.freeze_patch_embedder = CONFIG.RL_freeze_patch_embedder
# Determine the number of segmentation info channels to use
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
# The number of segmentation channels available depend on the dataset
if CONFIG.MISC_dataset == 'dubai':
self.seg_chan = 3
elif CONFIG.MISC_dataset == 'masa':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'masa_filt':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'masa_seven':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'dubai_seven':
self.seg_chan = 1
elif CONFIG.MISC_dataset == 'images_pre':
self.seg_chan = 1
else:
raise(Exception("Use segmentation information was selected but the dataset has no segmentation info."))
else:
self.seg_chan = 0
# Define the embedder
if CONFIG.RL_patch_embedder == 'Segmenter':
self.patch_emb = self._make_segmenter_patch_embedder()
elif 'Doerch' in CONFIG.RL_patch_embedder:
self.patch_emb = self._make_doerch_patch_embedder()
elif 'ShareNet' in CONFIG.RL_patch_embedder: # DEFAULT
self.patch_emb = self._make_doerch_patch_embedder()
elif CONFIG.RL_patch_embedder is None:
self.patch_emb = None
self.x_curr_emb = torch.zeros(1,1,256).to(CONFIG.device)
else:
raise(Exception("Unknown patch embedder selected in LSTMAgent:\t%s" % CONFIG.RL_patch_embedder))
# If enabled will also send a flattened grid location of the agent to the lstm
if CONFIG.RL_priv_grid_location:
im_H, im_W , patch_H , patch_W = *CONFIG.MISC_im_size , *CONFIG.MISC_patch_size
# Times two because two grids for current and previous positions
self.unit_size += 2 * int(im_H / patch_H) * int(im_W / patch_W)
if self.freeze_patch_embedder:
for param in self.patch_emb.parameters():
param.requires_grad = False
# Define the RNN
self.rnn = nn.LSTM(input_size = self.unit_size + 8 * CONFIG.EE_temporal, hidden_size = self.unit_size, num_layers = 1,
bias = True, batch_first = True, dropout = 0, bidirectional = False)
# If enabled, load a U-net and use it to predict the building segmentation mask
if CONFIG.RL_predict_seg_mask:
# First check if segmentation info is enabled, it shouldn't
if CONFIG.RL_priv_use_seg:
raise(Exception("Prediction of segmentation mask and ground truth segmentation mask cannot be enabled at the same time."))
self.seg_net = self._make_seg_net()
# TODO: Overwrites the line common_fc_input += self.unit_size above
common_fc_input = self.unit_size
# Define the final fully connected layer
self.fc = nn.Linear(common_fc_input , 8) # Directions from current
self.softmax = nn.Softmax( dim = 1 )
# Reset the agent to initialize the hidden states
self.reset()
# Set agent type to be able to rseset hidden states
self.AGENT_TYPE = 'RNN'
# If enabled the patch embeddings will be embedded with their absolute position in the image
# The below happens with default config (CONFIG.RL_LSTM_pos_emb = 'half')
if CONFIG.RL_LSTM_pos_emb:
self._preallocate_pos_enc(max_len = 29)
def _has_visited(self,loc,locs):
return 2 in (loc == locs).sum(dim=1)
def _make_seg_net(self):
# Check that the specified segmentaion log exists
if not os.path.exists(CONFIG.RL_pretrained_segmentation_net):
raise(Exception("Segmentation log does not exist:\t%s" % (CONFIG.RL_pretrained_segmentation_net)))
networks_file_path = os.path.join(CONFIG.RL_pretrained_segmentation_net,"u_net.py")
spec = importlib.util.spec_from_file_location("u_net", networks_file_path)
segmentation_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(segmentation_networks)
with open(os.path.join(CONFIG.RL_pretrained_segmentation_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded for the rl agent
network_type = info['NetType']
if network_type == 'UNet':
seg_net = segmentation_networks.UNet(3,2)
else:
raise(Exception(f"Uknkown segmentation network {network_type}"))
# Load the weights
if not os.path.exists(os.path.join(CONFIG.RL_pretrained_segmentation_net,"final_unet")):
raise(Exception("No U-net found in:\t%s" % CONFIG.RL_pretrained_segmentation_net))
seg_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_segmentation_net, "final_unet"), map_location = torch.device('cpu')))
for param in seg_net.parameters():
param.requires_grad = False
return seg_net
def _preallocate_pos_enc(self, dropout: float = 0.1, max_len: int = 16):
d_model = self.unit_size
if CONFIG.MISC_game_size == 7:
position = torch.arange(max_len).unsqueeze(1) * (5 - 2) / (CONFIG.MISC_game_size - 2)
else:
position = torch.arange(max_len).unsqueeze(1)
div_term_factor = -0.01
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
div_term = torch.exp(torch.arange(0, d_model, 2) * div_term_factor)
self.pe = torch.zeros( 1,max_len, d_model).to(CONFIG.device)
self.pe[0, :, 0::2] = torch.sin(position * div_term)
self.pe[0, :, 1::2] = torch.cos(position * div_term)
# Also construct two separate embedding with half dmodel
div_term_half = torch.exp(torch.arange(0, d_model // 2, 2) * div_term_factor)
self.pe_half = torch.zeros(1 , max_len , d_model // 2).to(CONFIG.device)
self.pe_half[0, :, 0::2] = torch.sin(position * div_term_half)
self.pe_half[0, :, 1::2] = torch.cos(position * div_term_half)
if False: # Plot of the similarity scores
grid_size = int( CONFIG.MISC_im_size[0] / CONFIG.MISC_patch_size[0] + 1 )
# cosine_sim_pos = [(0,0) , (1,2) , (4,2),(0,2) , (3,1),(0,1),(1,0),(4,4),(4,3),(5,1),(0,5),(2,5)]
cosine_sim_pos = [(0,0),(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,0),(1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(6,0),(6,1),(6,2),(6,3),(6,4),(6,5),(6,6)]
cosine_sim_pos = [(0,0),(0,1),(0,2),(0,3),(0,4),(1,0),(1,1),(1,2),(1,3),(1,4),(4,0),(4,1),(4,2),(4,3),(4,4)]
os.makedirs(os.path.join(CONFIG.STATS_log_dir, "positional_embeddings"),exist_ok = True)
for pos in cosine_sim_pos:
cosine_sim_heatmap(self.pe_half , pos = pos , grid_size = grid_size )
print("DONE")
time.sleep(999)
def embedd_position(self, x, locs , goal_emb_included = False):
""" Embedds position into the sequence. """
# First get position in image (x,y) normalized to (0,1)
xy_pos = locs[:,0:2] / (torch.tensor(CONFIG.MISC_im_size))
# Append x and y position (-1,-1) for goal crop(which is the last)
if goal_emb_included:
xy_pos = torch.cat((xy_pos, torch.tensor([[-1,-1]])) , dim = 0)
# Get position in grid
xy_grid = (locs[:,0:2] / (torch.tensor(CONFIG.MISC_patch_size))).long()
# We want the goal crop to get f(0) add that to grid
if goal_emb_included:
xy_grid = torch.cat(( xy_grid , torch.tensor([[0,0]])) , dim = 0 )
# Half the positional embedding is for x other half for y
pos_embedding = torch.flatten(self.pe_half[0,xy_grid] , start_dim = 1, end_dim = 2)
x_pos_emb = x + pos_embedding
return x_pos_emb
def _make_segmenter_patch_embedder(self):
"""
Retrieves the encoder part of the segmentation,
this requires only rgb input and the latent space from a segmentation
task
"""
if not os.path.exists(CONFIG.RL_pretrained_segmentation_net):
print("The segmentation encoder does not exist, check that the path is correct")
exit(1)
# Gets the network file from the log file and load it as a module
networks_file_path = os.path.join(CONFIG.RL_pretrained_segmentation_net,"u_net.py")
spec = importlib.util.spec_from_file_location("u_net", networks_file_path)
segmentation_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(segmentation_networks)
# Load the json file generated during the segmentation training
with open(os.path.join(CONFIG.RL_pretrained_segmentation_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded for the rl agent
network_type = info['NetType']
if network_type == 'UNet':
feature_net = segmentation_networks.UNet(3,2)
else:
raise(Exception(f"Ukn segmentation network {network_type}"))
latentSpaceSize = info['LatentSpaceSize']
if CONFIG.RL_priv_pretrained:
feature_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_segmentation_net, "final_unet")))
if self.freeze_patch_embedder:
for param in feature_net.parameters():
param.requires_grad = False
# Unroll the network and allow it to get the embedding
modules = list(feature_net.children())
middle = int(len(modules)/2)
modules = modules[:middle]
# Assumes 64,3,3
# modules.append(nn.Conv2d(64,64,3))
modules.append(nn.Flatten(start_dim = 1))
modules.append(nn.Linear(9*64, 128))
modules.append(nn.ReLU())
embedder = nn.Sequential(*modules)
return embedder
def _make_doerch_patch_embedder(self):
if not os.path.exists(CONFIG.RL_pretrained_doerch_net):
print("The pretrained doerch net does not exist check the file path")
exit(1)
# Load the json file generated during pretraining
with open(os.path.join(CONFIG.RL_pretrained_doerch_net, "info.json"),'r') as io:
info = json.load(io)
# Determine which network should be loaded
network_type = info['NetType']
if network_type.startswith("Doerch"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "networks.py")
spec = importlib.util.spec_from_file_location("networks",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.AJNet(network_type)
elif network_type.startswith("ShareNet"):
networks_file_path = os.path.join(CONFIG.RL_pretrained_doerch_net, "share_net.py")
spec = importlib.util.spec_from_file_location("share_net",networks_file_path)
doerch_networks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(doerch_networks)
feature_net = doerch_networks.ShareNet()
else:
raise(Exception("Unknown encoder network "))
latentSpaceSize = info['LatentSpaceSize']
if CONFIG.RL_priv_pretrained:
feature_net.load_state_dict(torch.load(os.path.join(CONFIG.RL_pretrained_doerch_net,"doerch_embedder"), map_location=torch.device('cpu')))
if self.freeze_patch_embedder:
for param in feature_net.parameters():
param.requires_grad = False
return feature_net
def reset(self):
""" Resets the hidden states of the RNN network."""
# The size of the hidden states depend on the LSTM network
D = 2 if self.rnn.bidirectional else 1
self._hidden = torch.zeros(D * self.rnn.num_layers, 1, self.unit_size).to(CONFIG.device)
self._cell = torch.zeros(D * self.rnn.num_layers , 1,self.unit_size).to(CONFIG.device)
def forward(self, episode):
# For now only one mode, use latest available crop and step once
step = episode.step
x_curr = episode.crops[step , : , :,:].unsqueeze(0)
x_goal = episode.crop_goal
# If enabled predict segmentation mask and concat it to the patch input
if CONFIG.RL_predict_seg_mask:
seg_net_input = torch.cat((x_curr, x_goal) , dim = 0)
seg_mask = self.seg_net(seg_net_input)
# Output is one channel per class, i.e, need to do argmax to get it to one channel for two class problems
# TODO add exception when not using two class seg masks
seg_mask = torch.argmax( seg_mask , dim = 1, keepdim = True)
x_curr = torch.cat((x_curr, seg_mask[0,:].unsqueeze(0)) , dim = 1)
x_goal = torch.cat((x_goal, seg_mask[1,:].unsqueeze(0)) , dim = 1)
# First calculate embeddings of current crop and goal crop
if CONFIG.RL_patch_embedder is None:
x_curr_emb = self.x_curr_emb
x_softmax_emb = None
elif "Doerch" in CONFIG.RL_patch_embedder or "ShareNet" in CONFIG.RL_patch_embedder:
# DEFAULT BRANCH
x_curr_emb , x_softmax_emb = self.patch_emb(x_curr, x_goal)
x_curr_emb = x_curr_emb.unsqueeze(0)
else:
x_curr_emb = self.patch_emb(x_curr).unsqueeze(0)
x_goal_emb = self.patch_emb(x_goal).unsqueeze(0)
x_curr_emb = torch.cat((x_curr_emb,x_goal_emb), dim=2)
x_softmax_emb = None
# embedd position into result from patch_emb
if CONFIG.RL_LSTM_pos_emb:
x_curr_emb = self.embedd_position(x_curr_emb , episode.locs[step, :][None,:])
# If enabled send a flattened version of the grid location of the agent
if CONFIG.RL_priv_grid_location:
x_curr_emb = torch.cat((x_curr_emb ,
torch.flatten(episode.grid_loc[step,:]).unsqueeze(0).unsqueeze(0),
torch.flatten(episode.grid_curr_loc[step,:]).unsqueeze(0).unsqueeze(0)) , dim = 2)
# Append the patch embedders softmax embedding for further guidance
if CONFIG.EE_temporal:
x_curr_emb = torch.cat((x_curr_emb, x_softmax_emb.unsqueeze(0)), dim=2)
# Run embedding of current crop through LSTM network
x_curr_lstm , (self._hidden , self._cell) = self.rnn(x_curr_emb, (self._hidden , self._cell))
# Squeeze away the sequence dimension since it will always be one
x_curr_lstm = x_curr_lstm.squeeze(1)
# If the goal patch is not in the lstm append it after the lstm
x_fc = x_curr_lstm
x_fc = self.fc(x_fc)
if CONFIG.MISC_priv:
x_o = torch.zeros([1,8]).to(CONFIG.device)
step_sz = int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
visited_locs = (episode.locs[:episode.step,:2]/step_sz).int()
loc = (episode.locs[episode.step,:2]/step_sz).int()
# Ensure no stepping outside the image
if loc[0] <= 0:
x_o[0,-1] = -1
x_o[0,:2] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
# Ensure not stepping in same location if possible
if episode.step == 0:
return self.softmax(x_fc +1000000* x_o), None
if self._has_visited(loc - torch.as_tensor([1,0]),visited_locs):
x_o[0,0] = -1
if self._has_visited(loc - torch.as_tensor([1,-1]),visited_locs):
x_o[0,1] = -1
if self._has_visited(loc - torch.as_tensor([0,-1]),visited_locs):
x_o[0,2] = -1
if self._has_visited(loc - torch.as_tensor([-1,-1]),visited_locs):
x_o[0,3] = -1
if self._has_visited(loc - torch.as_tensor([-1,0]),visited_locs):
x_o[0,4] = -1
if self._has_visited(loc - torch.as_tensor([-1,1]),visited_locs):
x_o[0,5] = -1
if self._has_visited(loc - torch.as_tensor([0,1]),visited_locs):
x_o[0,6] = -1
if self._has_visited(loc - torch.as_tensor([1,1]),visited_locs):
x_o[0,7] = -1
if x_o.sum() == -8:
x_o = torch.zeros([1,8]).to(CONFIG.device)
# Ensure no stepping outside the image
# If the vector has been reset
if loc[0] <= 0:
x_o[0,:2] = -1
x_o[0,-1] = -1
if loc[1] <= 0:
x_o[0, 5:] = -1
if loc[0] >= CONFIG.MISC_game_size - 1:
x_o[0, 3:6] = -1
if loc[1] >= CONFIG.MISC_game_size - 1:
x_o[0, 1:4] = -1
return self.softmax(x_fc+1000000*x_o), None
if not CONFIG.RL_agent_allowed_outside:
outside = get_outside(episode).to(CONFIG.device)
x_fc = x_fc -10000* outside
if CONFIG.EE_residual:
x_fc += x_softmax_emb
x_fc = self.softmax(x_fc)
return x_fc, x_softmax_emb
| 18,173 | 43.004843 | 156 |
py
|
airloc
|
airloc-master/networks/__init__.py
| 0 | 0 | 0 |
py
|
|
airloc
|
airloc-master/networks/deterministic_agent.py
|
import torch
import importlib.util
import torch.nn as nn
import torch.nn.init as init
import math
import random
import sys
import os
import torch.nn.functional as F
from config import CONFIG
from networks.resnets import ResNet18
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
import json
from utils.utils import calculate_cnn_output_size
import inspect
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
class DeterministicAgent(nn.Module):
"Implementation of non-rnn agent"
def __init__(self, mode='spiral'):
super(DeterministicAgent,self).__init__()
self.mode = mode
self.softmax = nn.Softmax(dim=1)
self.AGENT_TYPE = 'DETERMINISTIC'
self.dir = -2
self.locked_in = False
def _has_visited(self,loc,locs):
if (loc<0).any() or (loc>=CONFIG.MISC_im_size[0]).any():
return True
return 2 in (loc == locs).sum(dim=1)
def _get_move(self,action):
if action < 0: action += 8
if action > 7: action -= 8
if action == 0:
return torch.as_tensor([-48,0])
if action == 1:
return torch.as_tensor([-48,48])
if action == 2:
return torch.as_tensor([0,48])
if action == 3:
return torch.as_tensor([48,48])
if action == 4:
return torch.as_tensor([48,0])
if action == 5:
return torch.as_tensor([48,-48])
if action == 6:
return torch.as_tensor([0,-48])
if action == 7:
return torch.as_tensor([-48,-48])
def _forward_spiral(self,episode):
# Assign zero probability to all classes
x_fc = torch.zeros([1,8])
# Get the current an all visited locations from the episode storage
visited_locs = episode.locs[:episode.step,:2]
loc = episode.locs[episode.step,:2]
# Ensure not stepping in same location if possible
if episode.step == 0:
for action in [0,2,4,6]:
next_loc = loc + self._get_move(action)
if not self._has_visited(next_loc,torch.as_tensor([[-1,-1]])):
if self._has_visited(next_loc + self._get_move(action+self.dir),torch.as_tensor([[-1,-1]])):
if self.dir == -2: self.dir=2
else: self.dir=-2
x_fc[0,action] = 1
return x_fc
last_action = episode.actions[episode.step - 1,:].argmax()
if self.locked_in and self._has_visited(loc + self._get_move(last_action),visited_locs):
x_fc[0,last_action] = 1
return x_fc
self.locked_in = False
if not self._has_visited(loc + self._get_move(last_action+self.dir),visited_locs):
a = last_action + self.dir
if a>7: a-=8
x_fc[0,a] = 1
return x_fc
elif not self._has_visited(loc + self._get_move(last_action),visited_locs):
x_fc[0,last_action] = 1
return x_fc
elif not self._has_visited(loc + self._get_move(last_action-self.dir),visited_locs):
a = last_action - self.dir
if a>7: a-=8
x_fc[0,a] = 1
if self.dir == -2: self.dir=2
else: self.dir=-2
return x_fc
else:
x_fc[0,last_action-4] = 1
self.locked_in = True
if self.dir == -2: self.dir=2
else: self.dir=-2
return x_fc
def _forward_rows(self, episode):
pass
def forward(self, episode):
if self.mode == 'spiral':
return self._forward_spiral(episode),None
elif self.mode == 'rows':
return self._forward_rows(episode),None
else:
raise(Exception(f"Unknown mode: \t {self.mode} selected for the DeterministicAgent"))
| 4,074 | 27.496503 | 113 |
py
|
airloc
|
airloc-master/networks/resnets.py
|
import torch
import torch.nn as nn
import math
import random
import sys
import os
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
# During dev
#sys.path.append(os.path.abspath('..'))
#execfile(os.path.join(__file__, '../config.py'))
from config import CONFIG
class ResNet18(nn.Module):
def __init__(self, channels_in = 3, output_dim = 512 , use_pretrained_net = False):
super(ResNet18 , self).__init__()
# Construct a ResNet18 submodule
self.resnet = models.resnet18( pretrained = use_pretrained_net)
# Overwrite initial layer to match our specifications
self.resnet.conv1 = nn.Conv2d( in_channels = channels_in , out_channels = 64 ,
kernel_size = 7 , stride = 2 , padding = 3 , bias = False)
self.resnet.fc = nn.Linear(512 , output_dim )
def forward(self, x):
x_res = self.resnet.forward(x)
return x_res
| 983 | 19.5 | 87 |
py
|
airloc
|
airloc-master/training/train_agent.py
|
import traceback
import pdb
from datetime import datetime
import random
import signal
import time
import numpy as np
import json
import os
import sys
# matplotlib is used for debugging image inputs to networks
import matplotlib
import matplotlib.pyplot as plt
from copy import deepcopy
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import math
from config import CONFIG
from utils.utils import load_normalize_data, visualize_trajectory,\
get_deterministic_crops, _setup_fixed_games,\
visualize_batch, setupLogDir
# Import agent utils
from utils.agent_utils import take_step, get_action, run_eval_trajectory, update_net
from networks.agent import Agent
from networks.RandomAgent import RandomAgent
from networks.rnn_agents import LSTMAgent
from utils.stat_collector import StatCollector
from torch.utils.tensorboard import SummaryWriter
from utils.training_utils import BatchStorage , EpisodeStorage
# Creates logging directory for this run
setupLogDir(CONFIG.STATS_log_dir)
def signalInterruptHandler(*args):
"""
Signal handler for Interrup Signal. Saves current network weights and exits.
"""
response = ''
if CONFIG.MISC_main_pid != os.getpid():
return
while (response != "y" and response != "n"):
response = input("Program interrupted, do you want to exit? (y/n)\t")
if response == "n":
print("Continuing training!")
return
elif response == "y":
# Aborting training save information and exit
info['FinishedOnIter'] = tot_itr
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4) # Save final model
response = ''
while (response != "y" and response != "n"):
response = input("Do you want to save network weights? (y/n)\t")
if response == "y":
print("Saving network weights!")
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
elif response == "n":
print("Not saving network weights!")
# Needs to terminate from main thread. There is a check for this variable in intitalize episode
CONFIG.TERMINATE = True
def signalSIGUSR1Handler(*args):
"""
When program receives SIGUSR1 print the log directory.
"""
if CONFIG.MISC_main_pid == os.getpid():
print("\nLog directory:\t%s\n" % CONFIG.STATS_log_dir)
info = dict([
("AgentType" ,CONFIG.RL_agent_network),
("PatchEmbedder" , CONFIG.RL_patch_embedder),
("Completed" , False),
('Metrics' , [
'Steps',
'FinalDistanceOnlyFailure',
'IoU',
'ValSteps',
'ValFinalDistanceOnlyFailure',
'ValIoU',
'StepMSE',
'StepMax',
'CumulativeRewardToGo',
'HasConverged',
'StepRatioOnlySuccess',
'Entropy',
'Loss',
'Difficulty',
'ActionsTaken',
'ValDifficulty',
'ValHasConverged',
'ValStepRatioOnlySuccess',
'ValCumulativeRewardToGo',
'FullValSteps',
'FullValIoU',
'FullValDistance',
'FullValHasConverged',
'ValActionsTaken',
'SeparatedSteps',
'SeparatedIoU',
'SeparatedCumulativeRewardToGo',
'SeparatedHasConverged',
'ValSeparatedSteps',
'ValSeparatedIoU',
'ValSeparatedCumulativeRewardToGo',
'ValSeparatedHasConverged',
]),
("StartedTraining" , str(datetime.now())),
("FinishedTraining" , 0),
("Dataset",CONFIG.MISC_dataset),
("MultiplyImages" , CONFIG.RL_multiply_images),
("NbrOfTrainableParameters" , 0),
("AgentClass" , "RL"),
("FinishedOnIter" , 0),
("FullValIters", []), # At which iterations is the model evaluated on full validation
])
# Set random seed
random.seed(CONFIG.MISC_random_seed)
np.random.seed(CONFIG.MISC_random_seed)
torch.manual_seed(CONFIG.MISC_random_seed)
# load the dataset
trainloader,valloader = load_normalize_data(download = False, batch_size = CONFIG.RL_batch_size , multiply_images = CONFIG.RL_multiply_images)
valloader_iterator = iter(valloader)
im_H, im_W = CONFIG.MISC_im_size
p_H,p_W = CONFIG.MISC_patch_size
max_len_batch = CONFIG.RL_max_episode_length*CONFIG.RL_batch_size * CONFIG.RL_multiply_images
ep_len = CONFIG.RL_max_episode_length
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
# Make device globaly available
CONFIG.device = device
# Setup Agent
if CONFIG.RL_agent_network == 'LSTMAgent':
agent_net = LSTMAgent()
elif CONFIG.RL_agent_network == 'Agent':
agent_net = Agent()
elif CONFIG.RL_agent_network == 'RandomAgent':
agent_net = RandomAgent()
else:
raise "Unknown RL agent selected."
agent_net = agent_net.to(device)
agent_parameters = filter(lambda p: p.requires_grad, agent_net.parameters())
params = sum([np.prod(p.size()) for p in agent_parameters])
# Add number of parameters to the info json
info['NbrOfTrainableParameters'] = int(params)
# Setup loss
criterion = nn.MSELoss()
if CONFIG.RL_optimizer == 'sgd':
optimizer = optim.SGD(agent_net.parameters() , lr = CONFIG.RL_learning_rate ,
weight_decay = CONFIG.RL_weight_decay ,
momentum = CONFIG.RL_momentum)
elif CONFIG.RL_optimizer == 'adam':
optimizer = optim.Adam(agent_net.parameters() , lr = CONFIG.RL_learning_rate ,
weight_decay = CONFIG.RL_weight_decay ,
betas = (CONFIG.RL_beta1 , CONFIG.RL_beta2) )
# Write info dictionary to log directory
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4)
# Setup StatCollector
metrics = info['Metrics']
exclude_prints = [
'ActionsTaken' , 'ValActionsTaken', 'ValPropTime', 'ValStepTime', 'StepTime', 'SeparatedSteps', 'SeparatedIoU',
'SeparatedCumulativeRewardToGo', 'SeparatedHasConverged', 'ValSeparatedSteps', 'ValSeparatedIoU',
'ValSeparatedCumulativeRewardToGo', 'ValSeparatedHasConverged','CumulativeRewardToGo', 'ValCumulativeRewardToGo',
'StepRatio','ValStepRatio', 'HasConverged' , 'ValHasConverged','ValCorrectActions','CorrectActions'
] # Does not print these statistics
tot_nbr_iter = CONFIG.RL_nbr_epochs * len(trainloader)
tot_itr = 0
sc = StatCollector(CONFIG.STATS_metrics_dir, tot_nbr_iter , print_iter = CONFIG.MISC_print_iter, exclude_prints = exclude_prints)
# Add all metrics to StatCollector
for metric in metrics:
sc.register(metric , {'type':'avg' ,'freq':'step'})
# Open statistics for dataset to find unnormalizing transform
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Storage objects for all actions, weights, rewards etcetera
batch = BatchStorage(sc)
episode = EpisodeStorage()
val_batch = BatchStorage(sc)
val_episode = EpisodeStorage()
if CONFIG.MISC_include_baseline:
base_batch = BatchStorage(sc)
base_episode = EpisodeStorage()
base_agent = RandomAgent()
# If enabled there will be an entropy bonus and a linear annealing of this bonus
entropy_bonus = CONFIG.RL_entropy
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
# Linear annealing
entropy_anneal_k_exp = math.exp( math.log( CONFIG.RL_entropy_lower / entropy_bonus) / tot_nbr_iter)
# Increase recursion limit for debugging
sys.setrecursionlimit(2000)
# Initialize all clocations to zero to avoid crashing later
loc_goal = None
loc_start = None
start_coord = None
goal_coord = None
rep_counter = 91
# Print out info regarding this training run
print("Starting training at:\t%s" % info['StartedTraining'])
print("Agent Network:\t%s" % CONFIG.RL_agent_network)
print("Patch Embedder:\t%s" % CONFIG.RL_patch_embedder)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
print("Trainloader length:\t%d" % len(trainloader) )
print("Valloader length:\t%d" % len(valloader) )
# Attach signal handler for Interrupt signal
signal.signal(signal.SIGINT , signalInterruptHandler)
signal.signal(signal.SIGUSR1 , signalSIGUSR1Handler)
CONFIG.TERMINATE = False
try:
for epoch in range(CONFIG.RL_nbr_epochs):
# Passes through the dataset in batches
for batch_counter,batch_data in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = batch_data
batch.initialize(batch_size = len(batch_images))
# Each image/episode is handled seperatly
for (episode_counter, episode_data) in enumerate(batch_images):
full_image = episode_data[None,:].to(device)
loc_start , loc_goal = None, None
# Initializes all training tensors and sets start and goal patch
episode.initialize(image = full_image, loc_goal = loc_goal, loc_start = loc_start)
done = False
# Run one episode of training
while not done:
# Get an action from the agent, given current trajectory
action, softmax_embedding = get_action(agent_net , episode)
# Update environment according to action
loc_next, reward, done = take_step(action , episode, softmax_embedding)
# Get the visible crop at current position
crop_current,loc_current = get_deterministic_crops( full_image, coords = loc_next[0])
# Update the episode
episode.update(action, reward, loc_current , crop_current)
# Finish the episode, count rewards to go, iou etcetera
episode.finish()
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Debugging: Visualize training trajectory
#visualize_trajectory(episode, save_name= 'train_vis_%d_%d' %(tot_itr,episode_counter),transform = unNormImage)
# Add result from episode to batch
batch.append_episode(episode)
batch.prepare_for_loss()
if any(batch.weights[:,0]==torch.nan): print("Weights contains Nan")
#batch.sc.s('GradientSize').collect(agent_net.patch_emb.common_fc_2.weight.grad.abs().mean().item())
prev_net = deepcopy(agent_net)
t = time.perf_counter()
update_net(batch, agent_net, optimizer, entropy_bonus)
batch.proptime = time.perf_counter() - t
batch.store()
se = 0
max = 0.
tot_params = 0
for params1,params2 in zip(agent_net.parameters(),prev_net.parameters()):
temp_se = (params1-params2).square().sum().item()
se += temp_se
max = np.maximum(max ,(params1-params2).abs().max().item())
if temp_se > 0:
tot_params += torch.numel(params1)
se = np.sqrt(se/tot_params)
# Old loss update method now we send in the optimizer to the
# network
batch.sc.s('StepMSE').collect(se)
batch.sc.s('StepMax').collect(max)
# Run the current model on one batch from the valloader
with torch.no_grad():
try:
images , (start_coords_, goal_coords_) = next(valloader_iterator)
except:
valloader_iterator = iter(valloader)
images , (start_coords_, goal_coords_) = next(valloader_iterator)
val_batch.initialize(batch_size=len(images))
if tot_itr % CONFIG.MISC_save_vis_iter == 0 and CONFIG.MISC_include_baseline:
base_batch.initialize(batch_size=len(images))
for (counter_val , episode_data) in enumerate(images):
episode_image = episode_data[None , :].to(device)
start_coord , goal_coord = None, None
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
val_episode = run_eval_trajectory( episode_image, val_episode, agent_net, loc_start = start_coord , loc_goal= goal_coord)
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
if tot_itr % CONFIG.MISC_save_vis_iter == 0 and CONFIG.MISC_include_baseline:
loc_start = val_episode.loc_start
loc_goal = val_episode.loc_goal
base_episode = run_eval_trajectory( episode_image,
base_episode,
agent_net, loc_start = loc_start , loc_goal= loc_goal, deterministic=False)
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
base_batch.append_episode(base_episode)
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
val_batch.append_episode(val_episode)
if tot_itr % CONFIG.MISC_save_vis_iter == 0:
visualize_batch(val_batch, PATH = CONFIG.STATS_vis_dir, transform = unNormImage, save_name = 'val_batch_' + str(tot_itr))
if CONFIG.MISC_include_baseline:
visualize_batch(base_batch, PATH = CONFIG.STATS_vis_dir, transform = unNormImage, save_name = 'val_batch_' + str(tot_itr), prefix='random')
# Save result
val_batch.store(mode = 'Val')
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
entropy_bonus *= entropy_anneal_k_exp
if tot_itr % CONFIG.MISC_print_iter == 0 or tot_itr == tot_nbr_iter:
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
batch.sc.print()
batch.sc.save()
# Increment total iteration counter by one
tot_itr += 1
if tot_itr % CONFIG.MISC_save_model_iter == 0:
torch.save(agent_net.state_dict(), os.path.join(CONFIG.STATS_log_dir, "model_%d" % tot_itr))
# BATCH COMPLETE
except Exception as e:
info['Exception'] = str(e)
info['BackTrace'] = traceback.format_exc()
info['FinishedTraining'] = str(datetime.now())
info['FinishedOnIter'] = tot_itr
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4)
print("\nAn exception occurred in the main loop!\n" + "\n"+ "#"*60 + "\n")
print(info['BackTrace'])
print("#"*60)
# Enter pdb to investigate
pdb.set_trace()
print("Training finished!")
info['Completed'] = True
info["FinishedTraining"] = str(datetime.now())
info['FinishedOnIter'] = tot_itr
# Write completed status to info.json
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4) # Save final model
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
| 16,125 | 37.122931 | 163 |
py
|
airloc
|
airloc-master/training/__init__.py
| 0 | 0 | 0 |
py
|
|
airloc
|
airloc-master/training/run_deterministic.py
|
from datetime import datetime
import random
import time
import numpy as np
import json
from shutil import copyfile
import os
import sys
# matplotlib is used for debugging image inputs to networks
import matplotlib
import matplotlib.pyplot as plt
#matplotlib.use('TkAgg')
from copy import deepcopy
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import sys
import math
from config import CONFIG
from utils.utils import load_normalize_data, visualize_trajectory, get_random_crops,\
get_deterministic_crops , get_crop_distance , compute_iou,\
check_outside, _setup_fixed_games, visualize_batch
# Import agent utils
from utils.agent_utils import get_reward , compute_loss , \
take_step , check_if_done , \
rewards_to_go , get_policy ,\
get_action , run_eval_trajectory
from doerchnet.utils import sample_doerch_crops
from networks.rl_agent import AJRLAgent, PatchResNetAgent
from networks.agent import Agent
from networks.RandomAgent import RandomAgent
from networks.pretrained_resnet_agent import PretrainedResNetAgent
from networks.rnn_agents import LSTMAgent
from networks.deterministic_agent import DeterministicAgent
from utils.stat_collector import StatCollector
from torch.utils.tensorboard import SummaryWriter
from utils.training_utils import BatchStorage , EpisodeStorage
#Create a directory to save the info
if not os.path.exists(CONFIG.STATS_log_dir_base):
os.makedirs(CONFIG.STATS_log_dir_base)
os.makedirs(CONFIG.STATS_log_dir, exist_ok = False)
# Save visualizations in separate directory
vis_dir = os.path.join(CONFIG.STATS_log_dir, "visualizations")
os.makedirs(vis_dir)
scripts_dir = os.path.join(CONFIG.STATS_log_dir, "saved_scripts")
os.makedirs(scripts_dir)
# Save this training file
copyfile("training/train_agent.py" , os.path.join(scripts_dir, "train_agent.py"))
# Save config file
copyfile("config.py" , os.path.join(scripts_dir , "config.py"))
# Save network
copyfile("networks/early_rl_agents.py" , os.path.join(scripts_dir , "early_rl_agents.py"))
copyfile("networks/resnets.py" , os.path.join(scripts_dir, "resnets.py"))
copyfile("networks/rnn_agents.py", os.path.join(scripts_dir, "rnn_agents.py"))
# Save Utils files
copyfile("utils/utils.py", os.path.join(scripts_dir, "utils.py"))
copyfile("utils/training_utils.py" , os.path.join(scripts_dir , "training_utils.py"))
copyfile("utils/agent_utils.py" , os.path.join(scripts_dir, "agent_utils.py"))
# TODO - Add pretrained log dir
# Create folder for saving intermediate models
os.makedirs(os.path.join(CONFIG.STATS_log_dir, "models"))
metrics_dir = os.path.join(CONFIG.STATS_log_dir, "metrics")
os.makedirs(metrics_dir)
info = dict([
("AgentType" ,CONFIG.RL_agent_network),
("PatchEmbedder" , CONFIG.RL_patch_embedder),
("Completed" , False),
('Metrics' , [
'Steps',
'FinalDistanceOnlyFailure',
'IoU',
'ValSteps',
'ValFinalDistanceOnlyFailure',
'ValIoU',
# 'GradientSize',
#'GradientSize',
'CumulativeRewardToGo',
'HasConverged',
'StepRatioOnlySuccess',
#'StepTime',
#'PropTime',
#'CorrectActions',
'ActionsTaken',
'ValHasConverged',
'ValStepRatioOnlySuccess',
'ValCumulativeRewardToGo',
#'ValStepTime',
#'ValPropTime',
'FullValSteps',
'FullValIoU',
'FullValDistance',
'FullValHasConverged',
'ValActionsTaken',
#'ValCorrectActions',
'SeparatedHasConverged',
'SeparatedCumulativeRewardToGo',
'SeparatedSteps',
'SeparatedIoU',
'ValSeparatedHasConverged',
'ValSeparatedCumulativeRewardToGo',
'ValSeparatedSteps',
'ValSeparatedIoU',
]),
("StartedTraining" , str(datetime.now())),
("FinishedTraining" , 0),
("Dataset",CONFIG.MISC_dataset),
("MultiplyImages" , CONFIG.RL_multiply_images),
("NbrOfTrainableParameters" , 0),
("AgentClass" , "RL"),
("FullValIters", []) # At which iterations is the model evaluated on full validation
])
# Set random seed
random.seed(CONFIG.MISC_random_seed)
np.random.seed(CONFIG.MISC_random_seed)
torch.manual_seed(CONFIG.MISC_random_seed)
# load the dataset
trainloader,valloader = load_normalize_data(download = False, batch_size = CONFIG.RL_batch_size , multiply_images = CONFIG.RL_multiply_images)
valloader_iterator = iter(valloader)
im_H, im_W = CONFIG.MISC_im_size
p_H,p_W = CONFIG.MISC_patch_size
max_len_batch = CONFIG.RL_max_episode_length*CONFIG.RL_batch_size * CONFIG.RL_multiply_images
ep_len = CONFIG.RL_max_episode_length
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
# Make device globaly available
CONFIG.device = device
# Setup Agent
if CONFIG.RL_agent_network == 'RandomAgent':
agent_net = RandomAgent()
elif CONFIG.RL_agent_network == 'SpiralAgent':
agent_net = DeterministicAgent(mode = 'spiral')
else:
raise "Unknown RL agent selected."
agent_net = agent_net.to(device)
agent_parameters = filter(lambda p: p.requires_grad, agent_net.parameters())
params = sum([np.prod(p.size()) for p in agent_parameters])
# Add number of parameters to the info json
info['NbrOfTrainableParameters'] = int(params)
# Write info dictionary to log directory
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4)
# Setup StatCollector
metrics = info['Metrics']
exclude_prints = ['ActionsTaken' , 'ValActionsTaken', 'ValPropTime', 'ValStepTime', 'StepTime',
'CumulativeRewardToGo', 'ValCumulativeRewardToGo','StepRatio','ValStepRatio',
'HasConverged' , 'ValHasConverged','ValCorrectActions','CorrectActions'] # Does not print these statistics
tot_nbr_iter = CONFIG.RL_nbr_epochs * len(trainloader)
tot_itr = 0
sc = StatCollector(metrics_dir, tot_nbr_iter , print_iter = CONFIG.MISC_print_iter, exclude_prints = exclude_prints)
# Add all metrics to StatCollector
for metric in metrics:
sc.register(metric , {'type':'avg' ,'freq':'step'})
# Open statistics for dataset to find unnormalizing transform
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Storage objects for all actions, weights, rewards etcetera
batch = BatchStorage(sc)
episode = EpisodeStorage()
val_batch = BatchStorage(sc)
val_episode = EpisodeStorage()
# If enabled there will be an entropy bonus and a linear annealing of this bonus
entropy_bonus = CONFIG.RL_entropy
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
# Linear annealing
#entropy_anneal_k = (entropy_bonus - CONFIG.RL_entropy_lower ) / tot_nbr_iter
entropy_anneal_k_exp = math.exp( math.log( CONFIG.RL_entropy_lower / entropy_bonus) / tot_nbr_iter)
# Increase recursion limit for debugging
sys.setrecursionlimit(2000)
# Initialize all clocations to zero to avoid crashing later
loc_goal = None
loc_start = None
start_coord = None
goal_coord = None
rep_counter = 91
# Print out info regarding this training run
print("Starting training at:\t%s" % info['StartedTraining'])
print("Agent Network:\t%s" % CONFIG.RL_agent_network)
print("Patch Embedder:\t%s" % CONFIG.RL_patch_embedder)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
try:
for epoch in range(CONFIG.RL_nbr_epochs):
# Passes through the dataset in batches
for batch_counter,batch_data in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = batch_data
batch.initialize(batch_size = len(batch_images))
# Each image/episode is handled seperatly
for (episode_counter, episode_data) in enumerate(batch_images):
full_image = episode_data[None,:].to(device)
loc_start , loc_goal = None, None
# Initializes all training tensors and sets start and goal patch
episode.initialize(image = full_image, loc_goal = loc_goal, loc_start = loc_start)
done = False
# Run one episode of training
while not done:
# Get an action from the agent, given current trajectory
action, softmax_embedding = get_action(agent_net , episode)
# Update environment according to action
loc_next, reward, done = take_step(action , episode, softmax_embedding)
# Get the visible crop at current position
crop_current,loc_current = get_deterministic_crops( full_image, coords = loc_next[0])
# Update the episode
episode.update(action, reward, loc_current , crop_current)
# Finish the episode, count rewards to go, iou etcetera
episode.finish()
# RNN networks need to reset their hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Add result from episode to batch
batch.append_episode(episode)
batch.prepare_for_loss()
batch.store()
# Run the current model on one batch from the valloader
with torch.no_grad():
try:
images , (start_coords_, goal_coords_) = next(valloader_iterator)
except:
valloader_iterator = iter(valloader)
images , (start_coords_, goal_coords_) = next(valloader_iterator)
val_batch.initialize(batch_size=len(images))
for (counter_val , episode_data) in enumerate(images):
episode_image = episode_data[None , :].to(device)
start_coord , goal_coord = None, None
val_episode = run_eval_trajectory( episode_image, val_episode, agent_net, loc_start = start_coord , loc_goal= goal_coord)
val_batch.append_episode(val_episode)
if tot_itr % CONFIG.MISC_save_vis_iter == 0:
visualize_batch(val_batch, PATH = vis_dir, transform = unNormImage, save_name = 'val_batch_' + str(tot_itr))
# Save result
val_batch.store(mode = 'Val')
if CONFIG.RL_entropy_lower is not None and CONFIG.RL_entropy is not None:
entropy_bonus *= entropy_anneal_k_exp
if tot_itr % CONFIG.MISC_print_iter == 0 or tot_itr == tot_nbr_iter:
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
batch.sc.print()
batch.sc.save()
# Increment total iteration counter by one
tot_itr += 1
# BATCH COMPLETE
except KeyboardInterrupt:
print("\nInterrupted")
while True:
i = input("\n Save model? (y/n)")
if i == "y":
print("Saving Model")
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
sys.exit(1)
elif i == "n":
print("Not Saving Model")
sys.exit(1)
print("No valid input")
print("Training finished!")
info['Completed'] = True
info["FinishedTraining"] = str(datetime.now())
# Write completed status to info.json
with open(os.path.join(CONFIG.STATS_log_dir , "info.json") , 'w') as info_file:
json.dump(info , info_file ,indent = 4) # Save final model
torch.save(agent_net.state_dict() , os.path.join(CONFIG.STATS_log_dir , "final_model"))
| 12,201 | 33.179272 | 142 |
py
|
airloc
|
airloc-master/eval/eval_agent.py
|
from datetime import datetime
import random
import time
import numpy as np
import json
import os
import sys
import importlib
# matplotlib is used for debugging image inputs to networks
import matplotlib
import matplotlib.pyplot as plt
#matplotlib.use('TkAgg')
import torch
import torchvision.transforms as transforms
import argparse
from config import CONFIG
from utils.utils import load_normalize_data, visualize_batch, find_latest_log
# Import agent utils
from utils.agent_utils import run_eval_trajectory
from networks.agent import Agent
from networks.RandomAgent import RandomAgent
from networks.rnn_agents import LSTMAgent
from utils.stat_collector import StatCollector
from torch.utils.tensorboard import SummaryWriter
from utils.training_utils import BatchStorage, EpisodeStorage
from config import CONFIG
def replace_config(loaded_config):
for key in loaded_config.keys():
if 'EVAL' not in key and 'device' not in key and 'MISC_main_pid' not in key and not 'dataset' in key and not 'allowed_outside' in key:
CONFIG[key] = loaded_config[key]
def main(args ):
if args.seed:
seed = args.seed
else:
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Get the log in the correct position in the log dir
log_base = "logs"
if args.log_dir:
log_dir = args.log_dir
else:
log_dir = find_latest_log(log_base , args.n)
eval_metrics_path = os.path.join(log_base,log_dir,'metrics_eval')
os.makedirs(eval_metrics_path,exist_ok=True)
# Import the CONFIG file from the log
scripts_dir = os.path.join(log_base,log_dir)
networks_file_path = os.path.join(scripts_dir, "config.py")
spec = importlib.util.spec_from_file_location("config",networks_file_path)
config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config_module)
loaded_config = config_module.CONFIG
replace_config(loaded_config)
CONFIG.STATS_dir_base = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..')
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, log_base)
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,log_dir)
CONFIG.MISC_vis_per_batch = 1
CONFIG.MISC_vis_iter = 1
CONFIG.TERMINATE = False
vis_dir = os.path.join(log_base,log_dir, "visualizations")
# The info dictionary for what to store
info = dict([
("AgentType" ,CONFIG.RL_agent_network),
("PatchEmbedder" , CONFIG.RL_patch_embedder),
("Completed" , False),
('Metrics' , [
'EpisodeTime',
'StocSteps',
'DetSteps',
'StocIoU',
'DetIoU',
'StocStepRatioOnlySuccess',
'DetStepRatioOnlySuccess',
'StocFinalDistanceOnlyFailure',
'DetFinalDistanceOnlyFailure',
'StocCumulativeRewardToGo',
'DetCumulativeRewardToGo',
'StocHasConverged',
'DetHasConverged',
'StocDifficulty',
'DetDifficulty',
'StocActionsTaken',
'DetActionsTaken',
'StocSeparatedSteps',
'DetSeparatedSteps',
'StocSeparatedIoU',
'DetSeparatedIoU',
'StocSeparatedCumulativeRewardToGo',
'DetSeparatedCumulativeRewardToGo',
'StocSeparatedHasConverged',
'DetSeparatedHasConverged',
'StocGoalLoc',
'DetGoalLoc',
'StocActionProbs',
'DetActionProbs',
]),
("StartedEvalAt" , str(datetime.now())),
("FinishedTraining" , 0),
("Dataset",CONFIG.MISC_dataset),
("MultiplyImages" , CONFIG.RL_multiply_images),
("NbrOfTrainableParameters" , 0),
("AgentClass" , "RL"),
("FullValIters", []) # At which iterations is the model evaluated on full validation
])
_,valloader = load_normalize_data(download = False, batch_size = 1 ,
multiply_images = 1,split=args.split,
use_eval_split = args.eval_split
)
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
# Make device globaly available
CONFIG.device = device
# Setup Agent
if CONFIG.RL_agent_network == 'LSTMAgent':
agent_net = LSTMAgent()
elif CONFIG.RL_agent_network == 'Agent':
agent_net = Agent()
elif CONFIG.RL_agent_network == 'RandomAgent':
agent_net = RandomAgent()
else:
raise "Unknown RL agent selected."
agent_net.load_state_dict(torch.load(os.path.join(log_base,log_dir,'final_model')), strict=False)
agent_net.eval()
agent_net = agent_net.to(device)
metrics = info['Metrics']
exclude_prints = [
'StocFinalDistanceOnlyFailure',
'StocCumulativeRewardToGo',
'StocStepRatioOnlySuccess',
'StocActionsTaken',
'StocSeparatedSteps',
'StocSeparatedIoU',
'StocSeparatedCumulativeRewardToGo',
'StocSeparatedHasConverged',
'DetFinalDistanceOnlyFailure',
'DetStepRatioOnlySuccess',
'DetCumulativeRewardToGo',
'DetActionsTaken',
'DetSeparatedSteps',
'DetSeparatedIoU',
'DetSeparatedCumulativeRewardToGo',
'DetSeparatedHasConverged',
'StocGoalLoc',
'DetGoalLoc',
'StocActionProbs',
'DetActionProbs',
] # Does not print these statistics
num_passes = 1
tot_nbr_iter = num_passes* len(valloader)
tot_itr = 0
sc = StatCollector(eval_metrics_path, tot_nbr_iter , print_iter = CONFIG.MISC_print_iter, exclude_prints = exclude_prints)
# Add all metrics to StatCollector
for metric in metrics:
sc.register(metric , {'type':'avg' ,'freq':'step'})
# Open statistics for dataset to find unnormalizing transform
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Storage objects for all actions, weights, rewards etcetera
stoc_batch = BatchStorage(sc)
stoc_episode = EpisodeStorage()
det_batch = BatchStorage(sc)
det_episode = EpisodeStorage()
# Print out info regarding this training run
print("Starting eval at:\t%s" % info['StartedEvalAt'])
print("Agent Network:\t%s" % CONFIG.RL_agent_network)
print("Patch Embedder:\t%s" % CONFIG.RL_patch_embedder)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
misc_info = []
with torch.no_grad():
for epoch in range(num_passes):
# Passes through the dataset in batches
for batch_counter,batch_data in enumerate(valloader):
# Get the images from the batch
batch_images , (start_crops_ , goal_crops_) = batch_data
# Initialize the utils
stoc_batch.initialize(batch_size = len(batch_images))
det_batch.initialize(batch_size = len(batch_images))
# Loop over the batch of images
for (episode_counter, episode_data) in enumerate(batch_images):
episode_image = episode_data[None, :].to(device)
start_coord, goal_coord = start_crops_[0, :], goal_crops_[0, :]
# TODO: ENABLE NOT RUNNING STOC TO SAVE TIME !!
#stoc_episode = run_eval_trajectory(episode_image, stoc_episode, agent_net, loc_start=start_coord, loc_goal=goal_coord, deterministic=False)
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
#t1 = time.process_time()
det_episode = run_eval_trajectory(episode_image, det_episode, agent_net, loc_start=start_coord, loc_goal=goal_coord, deterministic=True)
if True:
misc_info.append(np.concatenate([det_episode.actions.cpu().detach().numpy(),
np.squeeze(det_episode.misc, axis=1),
det_episode.weights.cpu().detach().numpy()[:, np.newaxis],
det_episode.dists.cpu().detach().numpy()[:, np.newaxis]], axis=1))
#t2 = t1 - time.process_time()
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
#stoc_batch.append_episode(stoc_episode)
det_batch.append_episode(det_episode)
#stoc_batch.store(mode = 'Stoc',eval=True)
det_batch.store(mode = 'Det',eval=True)
#det_batch.sc.s('EpisodeTime').collect(t2)
if tot_itr % CONFIG.EVAL_save_vis_iter == 0 :
visualize_batch(det_batch, PATH = vis_dir, transform = unNormImage, save_name = 'eval_' + str(tot_itr), prefix='Det')
#visualize_batch(stoc_batch, PATH = vis_dir, transform = unNormImage, save_name = 'eval_' + str(tot_itr), prefix='Stoc')
if tot_itr % CONFIG.MISC_print_iter == 0 or tot_itr == tot_nbr_iter:
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
det_batch.sc.print()
det_batch.sc.save()
# Increment total iteration counter
tot_itr += 1
print("Iter: %d / %d" % (tot_itr , tot_nbr_iter))
det_batch.sc.save()
stat_path = os.path.join(CONFIG.STATS_log_dir,'final_stats.txt')
with open(stat_path, 'a') as file:
print(f"Restults from {CONFIG.MISC_dataset} using {args.split}-set", file=file)
print(f"Restults from {CONFIG.MISC_dataset} using {args.split}-set")
det_batch.sc.print()
det_batch.sc.exclude_prints = None
det_batch.sc.print(path=stat_path)
np.save('misc_info', misc_info)
print("Evaluation completed!")
if __name__ == '__main__':
# This part is used to be able to simply generate split files for datasets
parser = argparse.ArgumentParser()
log_args = parser.add_mutually_exclusive_group()
log_args.add_argument( "-n", type = int, help = "Select log number",default = 0)
log_args.add_argument("--log_dir", type = str, help = "Select log name", default=None)
parser.add_argument("--no_cuda", action='store_true', help = "Disable cuda", default=False)
parser.add_argument("--seed", type = int, help = "Set seed", default=None)
parser.add_argument("--split", type = str, help = "Set split", default='val')
parser.add_argument("--eval_split", type = str, help = "Set split", default='basic')
args = parser.parse_args()
main(args)
| 11,306 | 40.417582 | 160 |
py
|
airloc
|
airloc-master/eval/__init__.py
| 0 | 0 | 0 |
py
|
|
airloc
|
airloc-master/logs/2022-06-23_08-29-00-213310_residual_nosemseg_seed0/config.py
|
"""
Central configration file for the project. Acts as a storage
of global variables and various configuration settings.
TODO TODO TODO: Remove unnecessary fields below; this is copied from an earlier
project
"""
import os
import pprint
from easydict import EasyDict as edict
from datetime import datetime
import socket # To Resolve host name and set path appropriatly
# TODO: If we're to use rllib, then this may need to be uncommented.
#from rllib.utils import rllib_get_config
CONFIG = edict()
"""
Evaluation RL agent
"""
# These settings control the evaluation runs of the saved agents.
# EVAL_RL_log is which saved agent should be used. If a number n, it picks the n:th,
# latest log availabled. Note. n=1 picks the penultimate available log
# If set to a specific log it tries to load that log
CONFIG.EVAL_RL_log = None
CONFIG.EVAL_RL_saved_logs = False # If enabled picks the model from those in saved_logs
CONFIG.EVAL_RL_multiply_images = 1
CONFIG.EVAL_save_vis_iter = 10
CONFIG.EVAL_RL_use_val_set = True
"""
RL-agent
"""
######################### This is where the important settings start #########################
# Batch n Stuff
CONFIG.RL_nbr_epochs = 10000
CONFIG.RL_batch_size = 32
CONFIG.RL_multiply_images = 2
#CONFIG.RL_max_episode_length = 10
CONFIG.RL_repeat_game = False
CONFIG.MISC_priv = False
# Architecture
CONFIG.RL_agent_network = 'LSTMAgent'
CONFIG.RL_patch_embedder = 'ShareNet'
CONFIG.RL_mask_embedder = 'Regular' # Not active
CONFIG.RL_freeze_patch_embedder = True
CONFIG.RL_priv_pretrained = True
# Optimizer
CONFIG.RL_learning_rate = 1e-4
CONFIG.RL_nbr_eps_update = (CONFIG.RL_batch_size * CONFIG.RL_multiply_images)//1
CONFIG.RL_weight_decay = 0
CONFIG.RL_momentum = 0.90
CONFIG.RL_optimizer = 'adam'
CONFIG.RL_beta1 = 0.9
CONFIG.RL_beta2 = 0.999
#Env setup
CONFIG.RL_agent_allowed_outside = True
CONFIG.RL_normalize_weights = True
CONFIG.RL_normalize_method = 'grid' # When grid game enabled, dist becomes grid_distance
CONFIG.RL_eval_deterministic = True
CONFIG.RL_priv_grid_location = False
CONFIG.RL_priv_use_seg = False
# Continuous
CONFIG.RL_dist_var = 2162.25
CONFIG.RL_anneal_lwr = 132.56 # The lower limit for the annealing of the variace, if none no annealing
"""
RL Rewards
"""
CONFIG.RL_reward_goal = 3
CONFIG.RL_reward_difficulty = 0
CONFIG.RL_reward_failed = 0
CONFIG.RL_reward_closer = 0
CONFIG.RL_reward_iou_scale = 0
CONFIG.RL_reward_step_outside = 0
CONFIG.RL_reward_distance = False
CONFIG.RL_reward_step = -1
CONFIG.RL_reward_exploit_not_adjacent = 0#-0.4
CONFIG.RL_reward_exploit_adjacent2goal = 0#1
# LSTM Agent settings
CONFIG.RL_LSTM_pos_emb = True
CONFIG.LSTM_global_pos = True
# Pretrained doerch
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/without-sem-seg' # without sem-seg
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/with-sem-seg' # with sem-seg
#########################This is where they end #########################
CONFIG.RL_max_start_goal_dist = 8
CONFIG.RL_min_start_goal_iou = None
CONFIG.RL_done_iou = 0.40
CONFIG.RL_discount_factor = 0.9
CONFIG.RL_softmax_agent = True
CONFIG.RL_softmax_step_size = 1.1 # When 1 step equal non-overlapping patches
CONFIG.RL_entropy = None
CONFIG.RL_entropy_lower = None
CONFIG.RL_froozen_embedder_iter = None
# Agent MISCs
CONFIG.RL_freeze_mask_embedder = False
# Control the difficulty of the sampled games
CONFIG.RL_init_diff = None # [0.8, 0.15, 0.05 , 0.0]
CONFIG.RL_final_diff = None # [0.25 ,0.25, 0.25 , 0.25]
CONFIG.RL_anneal_diff_iter = 10000
# Priv info to agent
CONFIG.RL_agent_mask = False # Agent centric mask
CONFIG.RL_priv_full_image = False
CONFIG.RL_priv_visit_mask = False # Masksize equal to image size
CONFIG.RL_small_mask = 48*2
CONFIG.extended_mask = False
# Transformer Agent Settings
CONFIG.RL_TRANSFORMER_pos_emb = 'half'
# Pretrained segmenter
CONFIG.RL_pretrained_segmentation_net = 'segmentations/logs/sem-seg-model'
CONFIG.RL_predict_seg_mask = False
# Doerch 80 dim
# CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/2022-04-25_13-54-35-226221'
# Used to load the pretrained resnet
CONFIG.RL_pretrained_cifar_net = 'cifar/logs/2022-02-23_16-27-52-943928'
CONFIG.RL_freeze_pretrained_cifar = False
"""
EE-Agent settings
"""
CONFIG.EE_residual = True
CONFIG.EE_hierarch = False
CONFIG.EE_exploit_priv = True
#CONFIG.EE_start_hierarch_it = 10000
"""
One-step supervised agent
"""
CONFIG.ONESTEP_optimizer = 'adam' # 'sgd' or 'adam'
CONFIG.ONESTEP_nbr_epochs = 1200
CONFIG.ONESTEP_learning_rate = 1e-4
CONFIG.ONESTEP_batch_size = 16
CONFIG.ONESTEP_momentum = 0.9
CONFIG.ONESTEP_beta1 = 0.5
CONFIG.ONESTEP_beta2 = 0.999
CONFIG.ONESTEP_weight_decay = 0.0
CONFIG.ONESTEP_use_pen_fun = False
# Networks: SimplestNet,SimplestNet_with_targets,DoerschNet,DoerschNetWithPriv
CONFIG.ONESTEP_network = 'SimplestBranchedNet' # 'DoerschNet' or 'SimplestNet'
CONFIG.ONESTEP_max_start_goal_dist = 100
CONFIG.ONESTEP_min_start_goal_iou = 0.0
# CONFIG.ONESTEP_augment_training_data = True
CONFIG.ONESTEP_pretrain_weights = ""
# Enable privlidged information
CONFIG.ONESTEP_priv_full_img = True
CONFIG.ONESTEP_priv_target_distance_fc = False # Adds target distance to final fc layer
CONFIG.ONESTEP_priv_target_distance_ch = False # Adds target distance as a channel in input
CONFIG.ONESTEP_priv_target_direction_fc = True
"""
Exaustive Search Agent
"""
CONFIG.EXAUST_batch_size=1
CONFIG.EXAUST_stop_iou=1
CONFIG.EXAUST_max_start_goal_dist = None
CONFIG.EXAUST_min_start_goal_iou = None
"""
Random Search baseline agent.
"""
CONFIG.RANDOM_batch_size = 1
CONFIG.RANDOM_using_memory = True #If true the agent cannot visit the same patch twice
CONFIG.RANDOM_max_start_goal_dist = None
CONFIG.RANDOM_min_start_goal_iou = None
CONFIG.RANDOM_stop_iou = 0.2
CONFIG.RANDOM_min_iou_visited = 0.3 # At what IoU should a location be considered already visited
CONFIG.RANDOM_WARNING_steps = 500 # Warn user if agent takes this many step without funding goal
"""
Statistics / Logging / Plotting
"""
CONFIG.STATS_dir_base = os.path.dirname(os.path.abspath(__file__))
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, 'logs')
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,
str(datetime.now()).replace(' ', '_')
.replace(':', '-').replace('.', '-'))
CONFIG.STATS_tensorboard_dir = os.path.join(CONFIG.STATS_log_dir, 'tb')
# CONFIG.STATS_save_model_batch = 250
"""
Plotting
"""
CONFIG.PLOT_plot_vis = True # TODO - not yet implemented
CONFIG.PLOT_use_saved_logs = True # This option selects wether to use "saved_logs" or "logs"
CONFIG.PLOT_training = True # plot training data (or eval data)
# The option below lets the user choose which LOG directory to plot information from
# As before, an integer signifies the n:th most recent log
# A specific log name tries to find that directory
CONFIG.PLOT_log_dir = 1
# The option below lets the user choose which EVAL directory to plot information from.
# I.e, choose which eval session to plot from given a specific training session
CONFIG.PLOT_eval_dir = None
"""
Miscellaneous
"""
CONFIG.MISC_include_baseline = True
CONFIG.MISC_use_gpu = False#True
CONFIG.MISC_dataset = 'masa_filt'
CONFIG.MISC_dataset_split_file = None
CONFIG.MISC_grid_game = True
CONFIG.MISC_random_seed = 0
#CONFIG.MISC_rnd_crop = True
CONFIG.MISC_rgb_max = 255
#CONFIG.MISC_im_size = (256, 256)
CONFIG.MISC_step_sz = int(48*CONFIG.RL_softmax_step_size)
CONFIG.MISC_game_size = 5
CONFIG.MISC_im_size = (int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48),
int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48))
CONFIG.MISC_patch_size = (48, 48)
CONFIG.MISC_print_iter = 50
CONFIG.MISC_save_vis_iter = 400 # How often we save a visualiation
CONFIG.MISC_vis_per_batch = 12
CONFIG.MISC_send_to_board = False # Should we send the logging to tensorboard
CONFIG.MISC_use_subset_of_data = None # If set to None uses entire dataset. Otherwise only uses a subset of the images
CONFIG.MISC_use_fixed_patch = None # Use fixed image patches and not change them
CONFIG.MISC_data_aug = True
CONFIG.MISC_save_model_iter = 5000 # How often should we save the model weights
CONFIG.MISC_same_train_eval_set = False
CONFIG.MISC_project_root_path = os.path.dirname(__file__)
CONFIG.MISC_nbr_training_models = None
CONFIG.MISC_main_pid = os.getpid()
CONFIG.MISC_dataset_path = ""
hostname = socket.gethostname()
if hostname in ["john-UX430UA", "anton-Aspire-R5-471T", "dgxrise"]:
CONFIG.MISC_dataset_path = "../../datasets/"
elif hostname == "rise-gpu0":
CONFIG.MISC_dataset_path = "/home/datasets_thesis_aj/"
elif len(CONFIG.MISC_dataset_path) == 0:
print("Unknown computer, set dataset path manually in config.py")
| 8,709 | 32.117871 | 119 |
py
|
airloc
|
airloc-master/logs/2022-09-13_14-07-42-505667_pre_disaster/config.py
|
"""
Central configration file for the project. Acts as a storage
of global variables and various configuration settings.
TODO TODO TODO: Remove unnecessary fields below; this is copied from an earlier
project
"""
import os
import pprint
from easydict import EasyDict as edict
from datetime import datetime
CONFIG = edict()
"""
Evaluation of RL-agent
"""
# These settings control the evaluation runs of the saved agents.
# EVAL_RL_log is which saved agent should be used. If a number n, it picks the n:th,
# latest log availabled. Note. n=1 picks the penultimate available log
# If set to a specific log it tries to load that log
CONFIG.EVAL_RL_log = None
CONFIG.EVAL_RL_saved_logs = False # If enabled picks the model from those in saved_logs
CONFIG.EVAL_RL_multiply_images = 1
CONFIG.EVAL_save_vis_iter = 10
CONFIG.EVAL_RL_use_val_set = True
"""
RL-agent
"""
######################### This is where the important settings start #########################
# Batch n Stuff
CONFIG.RL_nbr_epochs = 10000
CONFIG.RL_batch_size = 32
CONFIG.RL_multiply_images = 2
#CONFIG.RL_max_episode_length = 10
CONFIG.MISC_priv = False
# Architecture
CONFIG.RL_agent_network = 'LSTMAgent' # AiRLoc agent
CONFIG.RL_patch_embedder = 'ShareNet'
CONFIG.RL_freeze_patch_embedder = True
CONFIG.RL_priv_pretrained = True
CONFIG.EE_temporal = True
CONFIG.EE_residual = True
# Optimizer
CONFIG.RL_learning_rate = 1e-4
CONFIG.RL_nbr_eps_update = (CONFIG.RL_batch_size * CONFIG.RL_multiply_images)//1
CONFIG.RL_weight_decay = 0
CONFIG.RL_momentum = 0.90
CONFIG.RL_optimizer = 'adam'
CONFIG.RL_beta1 = 0.9
CONFIG.RL_beta2 = 0.999
#Env setup
CONFIG.RL_agent_allowed_outside = True
CONFIG.RL_normalize_weights = True
CONFIG.RL_eval_deterministic = True
CONFIG.RL_priv_grid_location = False
CONFIG.RL_priv_use_seg = False # Set to True when training sem seg-based RL-agent (but False during inference -- should not use ground truth then!)
"""
RL Rewards
"""
CONFIG.RL_reward_goal = 3
CONFIG.RL_reward_failed = 0
CONFIG.RL_reward_closer = 0
CONFIG.RL_reward_iou_scale = 0
CONFIG.RL_reward_step_outside = 0
CONFIG.RL_reward_distance = False
CONFIG.RL_reward_step = -1
# LSTM Agent settings
CONFIG.RL_LSTM_pos_emb = True
# Pretrained doerch
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/2022-09-13_08-43-26-264957' # without sem-seg, michael
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/without-sem-seg' # without sem-seg
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/with-sem-seg' # with sem-seg
######################### This is where they end #########################
CONFIG.RL_max_start_goal_dist = 8
CONFIG.RL_min_start_goal_iou = None
CONFIG.RL_done_iou = 0.40
CONFIG.RL_discount_factor = 0.9
CONFIG.RL_softmax_step_size = 1.1 # When 1 step equal non-overlapping patches
CONFIG.RL_entropy = None
CONFIG.RL_entropy_lower = None
# Pretrained segmenter
CONFIG.RL_pretrained_segmentation_net = 'segmentations/logs/sem-seg-model'
CONFIG.RL_predict_seg_mask = False # Set to True during inference if using a sem-seg based RL-agent
"""
Random Search baseline agent
"""
CONFIG.RANDOM_batch_size = 1
CONFIG.RANDOM_using_memory = True # If true, the agent cannot visit the same patch twice
CONFIG.RANDOM_stop_iou = 0.2 # Not used in grid game setup
CONFIG.RANDOM_min_iou_visited = 0.3 # At what IoU should a location be considered already visited (not used in grid game setup)
CONFIG.RANDOM_WARNING_steps = 500 # Warn user if agent takes this many step without funding goal
"""
Statistics / Logging / Plotting
"""
CONFIG.STATS_dir_base = os.path.dirname(os.path.abspath(__file__))
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, 'logs')
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,
str(datetime.now()).replace(' ', '_')
.replace(':', '-').replace('.', '-'))
"""
Plotting
"""
# The option below lets the user choose which LOG directory to plot information from
# An integer signifies the n:th most recent log. A specific log name tries to find that directory
CONFIG.PLOT_log_dir = 1
# The option below lets the user choose which EVAL directory to plot information from.
# I.e, choose which eval session to plot from given a specific training session
CONFIG.PLOT_eval_dir = None
"""
Miscellaneous
"""
CONFIG.MISC_include_baseline = True
#CONFIG.MISC_use_gpu = False#True
#CONFIG.MISC_dataset = 'images_post'#'images_pre'
CONFIG.MISC_dataset_split_file = None
CONFIG.MISC_grid_game = True
CONFIG.MISC_random_seed = 0
#CONFIG.MISC_rnd_crop = True
CONFIG.MISC_rgb_max = 255
#CONFIG.MISC_im_size = (256, 256)
CONFIG.MISC_step_sz = int(48*CONFIG.RL_softmax_step_size)
CONFIG.MISC_game_size = 5
CONFIG.MISC_im_size = (int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48),
int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48))
CONFIG.MISC_patch_size = (48, 48)
CONFIG.MISC_print_iter = 50
CONFIG.MISC_save_vis_iter = 400 # How often we save a visualiation
CONFIG.MISC_vis_per_batch = 12
CONFIG.MISC_save_model_iter = 5000 # How often should we save the model weights
CONFIG.MISC_project_root_path = os.path.dirname(__file__)
CONFIG.MISC_main_pid = os.getpid()
CONFIG.MISC_dataset_path = "data" # Set accordingly
| 5,241 | 33.946667 | 148 |
py
|
airloc
|
airloc-master/logs/2022-06-23_08-30-12-761837_residual_semseg/config.py
|
"""
Central configration file for the project. Acts as a storage
of global variables and various configuration settings.
TODO TODO TODO: Remove unnecessary fields below; this is copied from an earlier
project
"""
import os
import pprint
from easydict import EasyDict as edict
from datetime import datetime
import socket # To Resolve host name and set path appropriatly
# TODO: If we're to use rllib, then this may need to be uncommented.
#from rllib.utils import rllib_get_config
CONFIG = edict()
"""
Evaluation RL agent
"""
# These settings control the evaluation runs of the saved agents.
# EVAL_RL_log is which saved agent should be used. If a number n, it picks the n:th,
# latest log availabled. Note. n=1 picks the penultimate available log
# If set to a specific log it tries to load that log
CONFIG.EVAL_RL_log = None
CONFIG.EVAL_RL_saved_logs = False # If enabled picks the model from those in saved_logs
CONFIG.EVAL_RL_multiply_images = 1
CONFIG.EVAL_save_vis_iter = 10
CONFIG.EVAL_RL_use_val_set = True
"""
RL-agent
"""
######################### This is where the important settings start #########################
# Batch n Stuff
CONFIG.RL_nbr_epochs = 10000
CONFIG.RL_batch_size = 32
CONFIG.RL_multiply_images = 2
CONFIG.RL_max_episode_length = 10
CONFIG.RL_repeat_game = False
CONFIG.MISC_priv = False
# Architecture
CONFIG.RL_agent_network = 'LSTMAgent'
CONFIG.RL_patch_embedder = 'ShareNet'
CONFIG.RL_mask_embedder = 'Regular' # Not active
CONFIG.RL_freeze_patch_embedder = True
CONFIG.RL_priv_pretrained = True
# Optimizer
CONFIG.RL_learning_rate = 1e-4
CONFIG.RL_nbr_eps_update = (CONFIG.RL_batch_size * CONFIG.RL_multiply_images)//1
CONFIG.RL_weight_decay = 0
CONFIG.RL_momentum = 0.90
CONFIG.RL_optimizer = 'adam'
CONFIG.RL_beta1 = 0.9
CONFIG.RL_beta2 = 0.999
#Env setup
CONFIG.RL_agent_allowed_outside = True
CONFIG.RL_normalize_weights = True
CONFIG.RL_normalize_method = 'grid' # When grid game enabled, dist becomes grid_distance
CONFIG.RL_eval_deterministic = True
CONFIG.RL_priv_grid_location = False
CONFIG.RL_priv_use_seg = False#True
# Continuous
CONFIG.RL_dist_var = 2162.25
CONFIG.RL_anneal_lwr = 132.56 # The lower limit for the annealing of the variace, if none no annealing
"""
RL Rewards
"""
CONFIG.RL_reward_goal = 3
CONFIG.RL_reward_difficulty = 0
CONFIG.RL_reward_failed = 0
CONFIG.RL_reward_closer = 0
CONFIG.RL_reward_iou_scale = 0
CONFIG.RL_reward_step_outside = 0
CONFIG.RL_reward_distance = False
CONFIG.RL_reward_step = -1
CONFIG.RL_reward_exploit_not_adjacent = 0#-0.4
CONFIG.RL_reward_exploit_adjacent2goal = 0#1
# LSTM Agent settings
CONFIG.RL_LSTM_pos_emb = True
CONFIG.LSTM_global_pos = True
# Pretrained doerch
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/without-sem-seg' # without sem-seg
CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/with-sem-seg' # with sem-seg
#########################This is where they end #########################
CONFIG.RL_max_start_goal_dist = 8
CONFIG.RL_min_start_goal_iou = None
CONFIG.RL_done_iou = 0.40
CONFIG.RL_discount_factor = 0.9
CONFIG.RL_softmax_agent = True
CONFIG.RL_softmax_step_size = 1.1 # When 1 step equal non-overlapping patches
CONFIG.RL_entropy = None
CONFIG.RL_entropy_lower = None
CONFIG.RL_froozen_embedder_iter = None
# Agent MISCs
CONFIG.RL_freeze_mask_embedder = False
# Control the difficulty of the sampled games
CONFIG.RL_init_diff = None # [0.8, 0.15, 0.05 , 0.0]
CONFIG.RL_final_diff = None # [0.25 ,0.25, 0.25 , 0.25]
CONFIG.RL_anneal_diff_iter = 10000
# Priv info to agent
CONFIG.RL_agent_mask = False # Agent centric mask
CONFIG.RL_priv_full_image = False
CONFIG.RL_priv_visit_mask = False # Masksize equal to image size
CONFIG.RL_small_mask = 48*2
CONFIG.extended_mask = False
# Transformer Agent Settings
CONFIG.RL_TRANSFORMER_pos_emb = 'half'
# Pretrained segmenter
CONFIG.RL_pretrained_segmentation_net = 'segmentations/logs/sem-seg-model'
CONFIG.RL_predict_seg_mask = True#False
# Doerch 80 dim
# CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/2022-04-25_13-54-35-226221'
# Used to load the pretrained resnet
CONFIG.RL_pretrained_cifar_net = 'cifar/logs/2022-02-23_16-27-52-943928'
CONFIG.RL_freeze_pretrained_cifar = False
"""
EE-Agent settings
"""
CONFIG.EE_residual = True
CONFIG.EE_hierarch = False
CONFIG.EE_exploit_priv = True
#CONFIG.EE_start_hierarch_it = 10000
"""
One-step supervised agent
"""
CONFIG.ONESTEP_optimizer = 'adam' # 'sgd' or 'adam'
CONFIG.ONESTEP_nbr_epochs = 1200
CONFIG.ONESTEP_learning_rate = 1e-4
CONFIG.ONESTEP_batch_size = 16
CONFIG.ONESTEP_momentum = 0.9
CONFIG.ONESTEP_beta1 = 0.5
CONFIG.ONESTEP_beta2 = 0.999
CONFIG.ONESTEP_weight_decay = 0.0
CONFIG.ONESTEP_use_pen_fun = False
# Networks: SimplestNet,SimplestNet_with_targets,DoerschNet,DoerschNetWithPriv
CONFIG.ONESTEP_network = 'SimplestBranchedNet' # 'DoerschNet' or 'SimplestNet'
CONFIG.ONESTEP_max_start_goal_dist = 100
CONFIG.ONESTEP_min_start_goal_iou = 0.0
# CONFIG.ONESTEP_augment_training_data = True
CONFIG.ONESTEP_pretrain_weights = ""
# Enable privlidged information
CONFIG.ONESTEP_priv_full_img = True
CONFIG.ONESTEP_priv_target_distance_fc = False # Adds target distance to final fc layer
CONFIG.ONESTEP_priv_target_distance_ch = False # Adds target distance as a channel in input
CONFIG.ONESTEP_priv_target_direction_fc = True
"""
Exaustive Search Agent
"""
CONFIG.EXAUST_batch_size=1
CONFIG.EXAUST_stop_iou=1
CONFIG.EXAUST_max_start_goal_dist = None
CONFIG.EXAUST_min_start_goal_iou = None
"""
Random Search baseline agent.
"""
CONFIG.RANDOM_batch_size = 1
CONFIG.RANDOM_using_memory = True #If true the agent cannot visit the same patch twice
CONFIG.RANDOM_max_start_goal_dist = None
CONFIG.RANDOM_min_start_goal_iou = None
CONFIG.RANDOM_stop_iou = 0.2
CONFIG.RANDOM_min_iou_visited = 0.3 # At what IoU should a location be considered already visited
CONFIG.RANDOM_WARNING_steps = 500 # Warn user if agent takes this many step without funding goal
"""
Statistics / Logging / Plotting
"""
CONFIG.STATS_dir_base = os.path.dirname(os.path.abspath(__file__))
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, 'logs')
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,
str(datetime.now()).replace(' ', '_')
.replace(':', '-').replace('.', '-'))
CONFIG.STATS_tensorboard_dir = os.path.join(CONFIG.STATS_log_dir, 'tb')
# CONFIG.STATS_save_model_batch = 250
"""
Plotting
"""
CONFIG.PLOT_plot_vis = True # TODO - not yet implemented
CONFIG.PLOT_use_saved_logs = True # This option selects wether to use "saved_logs" or "logs"
CONFIG.PLOT_training = True # plot training data (or eval data)
# The option below lets the user choose which LOG directory to plot information from
# As before, an integer signifies the n:th most recent log
# A specific log name tries to find that directory
CONFIG.PLOT_log_dir = 1
# The option below lets the user choose which EVAL directory to plot information from.
# I.e, choose which eval session to plot from given a specific training session
CONFIG.PLOT_eval_dir = None
"""
Miscellaneous
"""
CONFIG.MISC_include_baseline = True
CONFIG.MISC_use_gpu = False#True
CONFIG.MISC_dataset = 'masa_filt'
CONFIG.MISC_dataset_split_file = None
CONFIG.MISC_grid_game = True
CONFIG.MISC_random_seed = 0
#CONFIG.MISC_rnd_crop = True
CONFIG.MISC_rgb_max = 255
#CONFIG.MISC_im_size = (256, 256)
CONFIG.MISC_step_sz = int(48*CONFIG.RL_softmax_step_size)
CONFIG.MISC_game_size = 5
CONFIG.MISC_im_size = (int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48),
int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48))
CONFIG.MISC_patch_size = (48, 48)
CONFIG.MISC_print_iter = 50
CONFIG.MISC_save_vis_iter = 400 # How often we save a visualiation
CONFIG.MISC_vis_per_batch = 12
CONFIG.MISC_send_to_board = False # Should we send the logging to tensorboard
CONFIG.MISC_use_subset_of_data = None # If set to None uses entire dataset. Otherwise only uses a subset of the images
CONFIG.MISC_use_fixed_patch = None # Use fixed image patches and not change them
CONFIG.MISC_data_aug = True
CONFIG.MISC_save_model_iter = 5000 # How often should we save the model weights
CONFIG.MISC_same_train_eval_set = False
CONFIG.MISC_project_root_path = os.path.dirname(__file__)
CONFIG.MISC_nbr_training_models = None
CONFIG.MISC_main_pid = os.getpid()
CONFIG.MISC_dataset_path = ""
hostname = socket.gethostname()
if hostname in ["john-UX430UA", "anton-Aspire-R5-471T", "dgxrise"]:
CONFIG.MISC_dataset_path = "../../datasets/"
elif hostname == "rise-gpu0":
CONFIG.MISC_dataset_path = "/home/datasets_thesis_aj/"
elif len(CONFIG.MISC_dataset_path) == 0:
print("Unknown computer, set dataset path manually in config.py")
| 8,716 | 32.270992 | 119 |
py
|
airloc
|
airloc-master/doerchnet/plot_results.py
|
#!/bin/env python3
import os
import time
import sys
import math
from dateutil.parser import parse
import numpy as np
from config import CONFIG
import json
import argparse
from utils.utils import find_latest_log
MA_SMOOTH = 0.01
START_ITER_PLOT = 30
# For some, inxplicable reason, need to run this here??
# What are the customs_mas ?
def _custom_ma(data, ma_smooth=MA_SMOOTH):
for idx, val in enumerate(data['values']):
if idx < 30:
data['mas_custom'][idx] = data['means'][idx]
else:
data['mas_custom'][idx] = (1 - ma_smooth) * data['mas_custom'][idx - 1] + ma_smooth * data['values'][idx]
# Function for plotting each subplot
def _plot(datas, ax, title='plot', xlabel='x', ylabel='y', start_it=0, max_x=None, max_y=None, min_y = None,
show_draw='show' , legends = []):
legend_entries = []
for (i, data) in enumerate(datas):
# If the current data is full val print all values
if legends[i] == 'FullVal':
# Full val values are very sparse, no mean stuff and no filtering by start
x = data['times']
y = data['values']
format = 'x'
else:
start_it = START_ITER_PLOT
x = data['times'][start_it:] #Just iterations
y = data['mas_custom'][start_it:] #Some special mean value
format = '-'
p = ax.plot(x, y, format)
if isinstance( p , list):
for (i, pi) in enumerate(p):
pi.set_label("%d" % i)
else:
if len(legends) > i:
p.set_label(legends[i])
if len(legends) > 0:
ax.legend()
ax.grid(False)
# Calculate the axis in plot
if min_y is None:
min_y = np.min(y[start_it:])
if max_x is None:
max_x = x[-1]
if max_y is None:
max_y = np.max(y[start_it:])
# Setup argparse
parser = argparse.ArgumentParser()
# Choose log dir either based on name or on number n
log_selection = parser.add_mutually_exclusive_group()
log_selection.add_argument("--log-dir" , "-l" , type = str , help = "Select log dir based on name")
log_selection.add_argument("-n", type = int , help = 'Select the n:th latest log dir. 0 -> latest',default = 0)
parser.add_argument("--saved-logs", "-s", action="store_true", default = False, help = "Select log dir from the 'saved_logs' folder.")
parser.add_argument("--show" , action="store_true", default = False, help = "Show the plot on the screen instead of saving it.")
args = parser.parse_args()
## Script part
# Load and set correct settings for matplotlib based on wether to show the plot or just save it
if args.show:
import tkinter
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
else:
import matplotlib
from matplotlib import pyplot as plt
# First determine wheter to use "saved_logs" or "logs"
if args.saved_logs:
log_base = "saved_logs"
else:
log_base = "doerchnet/logs"
log_base = os.path.join(CONFIG.MISC_project_root_path , log_base)
# Next determine which log dir should be used
if args.log_dir is not None:
# Select dir named PLOT_log_dir
log_dir = args.log_dir
else:
# select the n:th latest log dir
log_dir = find_latest_log(log_base , args.n)
# If log_dir is None, there were not that many logs
if log_dir is None:
print("There are not that many training results in %s" % log_base)
exit(1)
path_log_dir = os.path.join(log_base , log_dir)
# We have now identified a log dir from a training session
# We make sure that the directory actually exists before proceeding
if not os.path.exists(path_log_dir):
print("Error, the selected log dir does not exist:\t%s" % path_log_dir)
print("Check arguments and/or plot settings in config.py")
exit(1)
# Open json containing information about the training session
try:
with open(os.path.join(path_log_dir , "info.json") , 'r') as json_file:
training_info = json.load(json_file)
except:
print("\nERROR: Unable to open info json.\n")
exit(1)
# Now check wheter to print training data or evaluation data
if True:
# Plot training data
# TODO - Put training data in sub folder. like "training_stats"
data_type = 'Training'
path_log_data_dir = path_log_dir
# Since we will be plotting training info. The info json will be the same as the training_info json
info = training_info
else:
# Plot evaluation data
# First need to determine which eval run to plot from
# This works the same way as when we choose which log dir to use
path_eval_dir_base = os.path.join(path_log_dir , "eval_logs")
if type(CONFIG.PLOT_eval_dir) == str:
eval_dir = CONFIG.PLOT_eval_dir
elif type(CONFIG.PLOT_eval_dir) == int:
# select the n:th latest log dir
eval_dir = find_latest_log(path_eval_dir_base , CONFIG.PLOT_eval_dir)
else:
# Select the latest available log dir
eval_dir = find_latest_log(path_eval_dir_base , 0)
if eval_dir is None:
print("There are not that many eval results in %s" % path_log_dir)
exit(1)
# We now have path_log_data_dir which contains all metrics
path_log_data_dir = os.path.join(path_eval_dir_base, eval_dir)
data_type = 'Eval'
# Load information about this eval run from the info file TODO change back to info.json
with open(os.path.join(path_log_data_dir,"info.json"), 'r') as json_file:
info = json.load(json_file)
# The correct directory containing the data we want to plot is now in 'path_log_data_dir'
metrics = info['Metrics']
# Before plotting, print information about the retrived data
print('')
print("Training session:\t%s" % log_dir)
print("Log directory:\t%s" % log_base)
# Filterd
filterdMetrics = list(filter(lambda s: not s.startswith('Val') and not s.startswith('FullVal'),metrics ))
# Calculate dimensions of subplots
n_cols = math.ceil(math.sqrt(len(filterdMetrics)))
n_rows = math.ceil(len(filterdMetrics) / n_cols)
#n_rows = 2
# Plot all metrics for the selected run in same figure.
fig , axes = plt.subplots(n_rows, n_cols, sharex = False, figsize = (12,8))
for (i, axis_inds) in enumerate(np.ndindex(axes.shape)):
ix , iy = axis_inds
if i >= len(filterdMetrics):
axes[ix,iy].axis('off')
continue
metric = filterdMetrics[i]
# Read data from log path
log_path = os.path.join(path_log_data_dir, metric + '.npz')
try:
data = np.load(log_path)
except:
print("\nERROR: Unable to load data for metric:\t%s\n" % metric)
exit(1)
data = {'means': data['means'], 'mas': data['mas'],
'values': data['values'], 'times': data['times'],
'mas_custom': np.zeros_like(data['mas'])}
_custom_ma(data)
legends = ['Train']
plotData = [data]
# Check if there is val data availble
if 'Val' + metric in metrics:
valData = np.load(os.path.join(path_log_data_dir , 'Val' + metric + '.npz'))
valData = {'means': valData['means'], 'mas': valData['mas'],
'values': valData['values'], 'times': valData['times'],
'mas_custom': np.zeros_like(valData['mas'])}
_custom_ma(valData)
legends.append('Val')
plotData.append(valData)
# Now check loaded data to make sure there are enough data points
if data['mas_custom'].shape[0] <= START_ITER_PLOT:
print("\nERROR: Too few data points saved for plotting.\n")
exit(1)
_plot(plotData,axes[ix,iy], show_draw='show' , legends =legends)
# Set title according to the json data file
axes[ix ,iy].set_title(metric)
# Set title of entire window
fig.canvas.manager.set_window_title("%s data from :\t%s" %( data_type, log_dir))
# set padding between plots
fig.tight_layout(pad = 3.0)
if args.show:
plt.show()
else:
# Find filepath
filename = os.path.join(path_log_dir, "Training_Statistics_%s_.png" % ( log_dir))
plt.savefig(filename)
print("\nPlot saved as:\t%s\n" % os.path.basename(filename))
| 8,056 | 31.099602 | 134 |
py
|
airloc
|
airloc-master/doerchnet/share_net.py
|
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,785 | 24.796296 | 115 |
py
|
airloc
|
airloc-master/doerchnet/utils.py
|
import torch
import numpy as np
import torchvision.transforms as transforms
import os
import gc
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import urllib
import matplotlib
import random
from torch.nn.functional import one_hot
from config import CONFIG
from utils.utils import get_deterministic_crops
def sample_doerch_crops(images):
"""
note that if max_dist and min_iou are both provided, then only max_dist
will be used. hence, for min_iou to take effect, max_dist has to be none.
"""
# NEW IDEA! Only sample n/2 pairs of crops and for the next n/2 pairs pick the opposite
# correct action as in the previous.
# When below variable is enabled this new approach is used. False gives old
NEW_SAMPLING_APPROACH = False
# When enabled exactly same number of locs in the different positions
EQUAL_GOAL_LOC_DISTRIBUTION = False
if EQUAL_GOAL_LOC_DISTRIBUTION and NEW_SAMPLING_APPROACH:
raise(Exception("EQUAL_GOAL_LOC_DISTRIBUTION does not work with NEW_SAMPLING_APPROACH"))
# define some useful constants
h, w = CONFIG.MISC_patch_size
im_h, im_w = CONFIG.MISC_im_size
actions_one_hot = torch.zeros((images.shape[0],8))
n_chan = images.shape[1]
n_imgs = images.shape[0]
N = images.shape[0]
# initialize memory for the crops size = (batch, n_chan, h_p, w_p)
# keep the number of channels at a constant
crops_goal = torch.zeros(size=(n_imgs, n_chan, h, w))
crops_start = torch.zeros(size=(n_imgs, n_chan, h, w))
loc_crops_goal = torch.zeros(size = ( n_imgs , 4))
loc_crops_start = torch.zeros(size = (n_imgs , 4))
if EQUAL_GOAL_LOC_DISTRIBUTION:
for i in range(0, 8):
actions_one_hot[(N//8 * (i)):(N//8) *(i+1), i] = 1
# Randomize if any left
for i in range(N//8 * 8, N):
actions_one_hot[i , random.randint(0,7)] = 1
if NEW_SAMPLING_APPROACH:
N = images.shape[0]
for i in range(N // 2 + N % 2): # If odd sample one more
# image is divided into a static uniform grid. patches are sampled from this grid
upper_h , upper_w = int(im_h / h) - 1 , int(im_w / w) - 1
lower_h , lower_w = ( 0 , 0)
grid_loc = np.floor(np.random.uniform( low = [lower_h ,lower_w] , high = [upper_h , upper_w]))
goal_loc, action = sample_goal(grid_loc,upper_h,upper_w)
actions_one_hot[i,action] = 1
locs_start = np.concatenate(((grid_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
locs_goal = np.concatenate(((goal_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
loc_crops_start[i, :] = torch.tensor(np.array(locs_start, dtype = int))
loc_crops_goal[i, :] = torch.tensor(np.array(locs_goal, dtype = int))
crops_start[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_start[0], left=locs_start[1], height=locs_start[2], width=locs_start[3])
crops_goal[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_goal[0], left=locs_goal[1], height=locs_goal[2], width=locs_goal[3])
# Now we have sampled half the pairs we need. The next half should be sampled start crops
# But inverse of previous correct action
for i in range(N//2 + N % 2 , N):
upper_h , upper_w = int(im_h / h) - 1 , int(im_w / w) - 1
lower_h , lower_w = ( 0 , 0)
grid_loc = np.floor(np.random.uniform( low = [lower_h ,lower_w] , high = [upper_h , upper_w]))
# The following line is only difference
goal_loc, action = opposite_goal(grid_loc,torch.argmax(actions_one_hot[i - N//2, :]).item())
actions_one_hot[i,action] = 1
locs_start = np.concatenate(((grid_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
locs_goal = np.concatenate(((goal_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
loc_crops_start[i, :] = torch.tensor(np.array(locs_start, dtype = int))
loc_crops_goal[i, :] = torch.tensor(np.array(locs_goal, dtype = int))
crops_start[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_start[0], left=locs_start[1], height=locs_start[2], width=locs_start[3])
crops_goal[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_goal[0], left=locs_goal[1], height=locs_goal[2], width=locs_goal[3])
else:
# The old approach, all samples are uniform
for i in range(images.shape[0]):
# image is divided into a static uniform grid. patches are sampled from this grid
upper_h , upper_w = int(im_h / h) - 2 , int(im_w / w) - 2
lower_h , lower_w = ( 1 , 1)
grid_loc = np.floor(np.random.uniform( low = [lower_h ,lower_w] , high = [upper_h , upper_w]))
if EQUAL_GOAL_LOC_DISTRIBUTION:
goal_loc = map_grid_action_to_goal(grid_loc, torch.argmax(actions_one_hot[i, :]))
else:
goal_loc, action = sample_goal(grid_loc,upper_h,upper_w)
actions_one_hot[i,action] = 1
locs_start = np.concatenate(((grid_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
locs_goal = np.concatenate(((goal_loc + 1/2) * np.array(CONFIG.MISC_patch_size) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
loc_crops_start[i, :] = torch.tensor(np.array(locs_start, dtype = int))
loc_crops_goal[i, :] = torch.tensor(np.array(locs_goal, dtype = int))
crops_start[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_start[0], left=locs_start[1], height=locs_start[2], width=locs_start[3])
crops_goal[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=locs_goal[0], left=locs_goal[1], height=locs_goal[2], width=locs_goal[3])
return crops_start,crops_goal,actions_one_hot ,loc_crops_start , loc_crops_goal
def get_label(loc_start,loc_goal, dim = 8):
"""
Given start and goal locations outputs the label for the doerchnet to predict.
Dimensions:
dim = 8: Eight possible locations close to start patch
dim = 25: Absolute prediction of location of goal patch
dim = 80: Relative prediction of location of goal patch compared to start patch.
"""
H, W = CONFIG.MISC_patch_size
step_sz = int(CONFIG.RL_softmax_step_size*H)
if dim == 8:
diff = (loc_goal[:,:2]- loc_start[:,:2] )/step_sz
diff += 1
inds = diff[:,0]*3 + diff[:,1]
actions = torch.zeros_like(inds)
for i,inds in enumerate(inds):
if inds == 0: actions[i] = 7
elif inds == 1: actions[i] = 0
elif inds == 2: actions[i] = 1
elif inds == 3: actions[i] = 6
elif inds == 4: raise(Exception("Same start and goal loc"))
elif inds == 5: actions[i] = 2
elif inds == 6: actions[i] = 5
elif inds == 7: actions[i] = 4
elif inds == 8: actions[i] = 3
actions = one_hot(actions.long(), dim).float()
elif dim == 25:
diff = loc_goal[:,:2]/step_sz
actions = diff[:,0]*5 + diff[:,1]
actions = one_hot(actions.long(), dim).float()
elif dim == 80:
# 0 - 80 from top left to lower right
# TODO Why not signed?
move = ((loc_goal[:,:2] - loc_start[:,:2]) / step_sz )
actions = torch.zeros((move.shape[0] , 9,9 ), dtype = torch.long) # NOTE - Current position still here
actions[torch.arange(move.shape[0]) , (4 + move[:,0]).long() , (4 + move[:,1]).long()] = 1
# Reshape to one hot encoding
actions = torch.flatten(actions , start_dim = 1)
# Check if any start and goal is at same position
if (actions[:, 4 * 9 + 4] == 1).any():
raise(Exception("Same start and goal location in get_label"))
else:
# Remove current position from label space
actions = torch.cat((actions[:,0:40] , actions[:,41:]), dim = 1).float()
else:
raise(Exception("UNexpected dimension in 'get_label':\t%d" % dim))
return actions
def opposite_goal(grid_loc, prev_action):
""" Select the opposing location."""
# 8 possible directions. Add four do modulo to find opposing side
action_idx = (prev_action + 4) % 8
goal_loc = map_grid_action_to_goal(grid_loc, action_idx)
return goal_loc, action_idx
def map_grid_action_to_goal(grid_loc , action):
step = CONFIG.RL_softmax_step_size
goal_loc = grid_loc.copy()
if action == 0: goal_loc += [-step,0]
elif action == 1: goal_loc += [-step,step]
elif action == 2: goal_loc += [0,step]
elif action == 3: goal_loc += [step,step]
elif action == 4: goal_loc += [step,0]
elif action == 5: goal_loc += [step,-step]
elif action == 6: goal_loc += [0,-step]
elif action == 7: goal_loc += [-step,-step]
return goal_loc
def sample_goal(grid_loc,upper_h,upper_w):
probs = np.ones((8))
# ensure no stepping outside the image
# TODO:Note Limit is dependent on upperh and upperw
if grid_loc[0] <= 0:
probs[-1] = 0
probs[0:2] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
if grid_loc[1] <= 0:
probs[ 5:] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
if grid_loc[0] >= upper_h:
probs[ 3:6] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
if grid_loc[1] >= upper_w:
probs[ 1:4] = 0
raise(Exception("Setting action probabilites to 0 in sample_goal"))
probs = probs/np.sum(probs)
action_idx = np.random.choice(range(8), p=probs)
goal_loc = map_grid_action_to_goal(grid_loc , action_idx)
return goal_loc, action_idx
def visualize_batch_doerch( imgs , locs_start , locs_goal , actions, transform = None , PATH = "vis" , save_name = 'vis', max_imgs = 8):
"""
Visualizes results from an entire batch during DoerchNet pretraining.
"""
n_imgs = min(imgs.shape[0] , max_imgs)
# Create new directory to save this batch visualization in
save_dir = os.path.join(PATH , save_name)
os.makedirs(save_dir)
imgs , locs_start, locs_goal , actions = imgs.cpu() , locs_start.cpu() , locs_goal.cpu() , actions.cpu()
# For each image make visualization
for i in range(n_imgs):
visualize_doerch(imgs[i,:] , locs_start[i,:], locs_goal[i,:], actions[i,:], transform = transform , PATH = save_dir, save_name = "vis_%d" % i)
def visualize_doerch(img , loc_start , loc_goal , action , transform = None, PATH = '.' , save_name = 'vis'):
"""
# Select first image:
action = actions[0,:]
loc_start = locs_start[0,:].cpu()
loc_goal = locs_goal[0,:].cpu()
img = imgs[0,:].detach().cpu()
"""
if transform is not None:
img = transform(img)
patch_size_tensor = torch.tensor(CONFIG.MISC_patch_size).cpu()
action_idx = torch.argmax(action).item()
loc_action = loc_start.detach().clone().cpu()
# given the action find the choosen location
if action.shape[0] == 8:
if action_idx == 0: loc_action[0:2] += torch.tensor([-1.1,0] ) * patch_size_tensor
elif action_idx == 1: loc_action[0:2] += torch.tensor([-1.1,1.1] ) * patch_size_tensor
elif action_idx == 2: loc_action[0:2] += torch.tensor([0,1.1]) * patch_size_tensor
elif action_idx == 3: loc_action[0:2] += torch.tensor([1.1,1.1] ) * patch_size_tensor
elif action_idx == 4: loc_action[0:2] += torch.tensor([1.1,0]) * patch_size_tensor
elif action_idx == 5: loc_action[0:2] += torch.tensor([1.1,-1.1] ) * patch_size_tensor
elif action_idx == 6: loc_action[0:2] += torch.tensor([0,-1.1] )* patch_size_tensor
elif action_idx == 7: loc_action[0:2] += torch.tensor([-1.1,-1.1] ) * patch_size_tensor
elif action.shape[0] == 25:
x,y = divmod(action_idx,5)
loc_action[0:2] = torch.tensor([x,y] ) * int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
elif action.shape[0] == 80:
if action_idx >= 40: action_idx += 1
x,y = divmod(action_idx,9)
x -= 4
y -= 4
loc_action[0:2] += torch.tensor([x,y] ) * int(CONFIG.RL_softmax_step_size*CONFIG.MISC_patch_size[0])
else:
raise(Exception("Unknown action space"))
# make sure integer value
loc_action = loc_action.long()
fig = plt.figure(figsize = (10, 6))
subfigs = fig.subfigures(1, 2)
ax = subfigs[0].subplots( )
ax.imshow(np.transpose(img, (1, 2, 0)))
rect_start = patches.Rectangle(xy=(loc_start[1], loc_start[0]), width=loc_start[3],
height=loc_start[2], linewidth=2, edgecolor='b', facecolor='none')
rect_goal = patches.Rectangle(xy=(loc_goal[1], loc_goal[0]), width=loc_goal[3],
height=loc_goal[2], linewidth=2, edgecolor='g', facecolor='none')
rec_action = patches.Rectangle(xy=(loc_action[1], loc_action[0]), width=loc_action[3],
height=loc_action[2], linewidth=1, edgecolor='y', facecolor='none')
# Add rectangles
ax.add_patch(rect_start)
ax.add_patch(rect_goal)
ax.add_patch(rec_action)
offset = CONFIG.MISC_patch_size[0] // 4
# TODO - Add text ???
ax.text(loc_start[1] + offset, loc_start[0] + offset + 5, f"Start", fontsize=18, color='w',rotation = 315,rotation_mode = 'anchor')
ax.text(loc_goal[1] + offset, loc_goal[0] + offset + 0 , f"Target", fontsize=18, color='w',rotation = 315,rotation_mode = 'anchor')
ax.text(loc_action[1] + offset, loc_action[0] + offset + 5, f"Agent", fontsize=18, color='w',rotation = 315,rotation_mode = 'anchor')
# Plot start and goal patch to the right
right_axes = subfigs[1].subplots(nrows = 2 , ncols = 1)
# get starat and goal crops
start_crop , _ = get_deterministic_crops(img.unsqueeze(0) , loc_start)
goal_crop , _ = get_deterministic_crops(img.unsqueeze(0) , loc_goal)
right_axes[0].imshow(start_crop.squeeze(0).permute(1,2,0))
right_axes[1].imshow(goal_crop.squeeze(0).permute(1,2,0))
right_axes[0].set_title("Start Crop")
right_axes[1].set_title("Goal Crop")
# Save figure
fig.savefig(os.path.join(PATH, save_name + '.png'))
# Close and clear up
plt.cla()
#plt.clf()
plt.close('all')
gc.collect()
def calculate_precision( outputs , labels):
"""
Calcualte accuracy(precision) of model for current batch for corners, boundaries and middle area.c:w
"""
corners = [0,4,20,24]
boundaries = [1,2,3,5,10,15,9,14,19,21,22,23]
middle = [6,7,8,11,12,13,16,17,18]
# Calcualte corner precision
corners_ind = labels[:,corners].any(dim = 1)
prec_corner = ( torch.argmax(outputs[corners_ind , :] , dim = 1) == torch.argmax(labels[corners_ind,:], dim = 1)).float().mean()
# Calcualte boundary precision
boundaries_ind = labels[:,boundaries].any(dim = 1)
prec_boundaries = ( torch.argmax(outputs[boundaries_ind , :] , dim = 1) == torch.argmax(labels[boundaries_ind,:], dim = 1)).float().mean()
# Calcualte corner precision
middle_ind = labels[:,middle].any(dim = 1)
prec_middle = ( torch.argmax(outputs[middle_ind , :] , dim = 1) == torch.argmax(labels[middle_ind,:], dim = 1)).float().mean()
return prec_corner.item() , prec_boundaries.item() , prec_middle.item()
if __name__ == '__main__':
# DEBUGGING get_label
H = 48
step_sz = int(CONFIG.RL_softmax_step_size*H)
loc1 = torch.tensor([[0,0]])
loc2 = torch.tensor([[0,1]]) * step_sz
loc3 = torch.tensor([[1,1]]) * step_sz
loc4 = torch.tensor([[0,3]]) * step_sz
comb1 = torch.cat((loc1,loc2))
comb2 = torch.cat((loc1,loc1))
comb3 = torch.cat((loc2 , loc3))
label1 = get_label(loc1, loc2, dim = 80)
label2 = get_label(loc3, loc1, dim = 80)
label3 = get_label(loc1, loc3, dim = 80)
label4 = get_label(comb1 , comb3, dim = 80)
label5 = get_label(comb2 , comb3 , dim = 80)
label6 = get_label(comb1, comb3 , dim = 80)
assert(label1[0,4*9 + 4] == 1)
assert(label2[0,3*9 + 3] == 1)
assert(label3[0,5*9 + 4] == 1)
pass
label7 = get_label(comb1, comb2)
| 16,618 | 40.967172 | 172 |
py
|
airloc
|
airloc-master/doerchnet/networks.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class AJNet(nn.Module):
def __init__(self,net_type,mode = 'train', unit_size = 128,num_classes =8 , both_branches = True):
super(AJNet,self).__init__()
# Allow for pretraining network with ground truth segmentation mask
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset == 'masa_filt':
self.n_chan = 4
else:
raise(Exception("Unkown dataset with segmentation info"))
else:
# Regular RGB inputs
self.n_chan = 3
self.mode = mode
self.both_branches = both_branches
# Choose the embedder for the Doerch net
# We start by assuming that we are using fixed weights
if net_type == 'Doerch':
self.start_enc = Encoder(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
elif net_type == 'Doerch2':
self.start_enc = Encoder2(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
else:
print(f"Unknown embedder {net_type} for this task")
exit(1)
if both_branches:
first_common_size = 2 * unit_size
else:
first_common_size = unit_size
self.relu = nn.ReLU()
self.common_fc_1 = nn.Linear(first_common_size, 2*unit_size)
self.common_fc_2 = nn.Linear(2*unit_size,num_classes)
def forward(self,start,goal, only_classify = False):
if self.both_branches:
start_emb = self.start_enc(start)
goal_emb = self.goal_enc(goal)
if self.both_branches:
common_emb = torch.cat([start_emb, goal_emb], dim = 1)
else:
common_emb = goal_emb
common_emb = self.relu(common_emb)
common_emb = self.common_fc_1(common_emb)
softmax_emb = self.common_fc_2(self.relu(common_emb))
softmax_emb = F.softmax(softmax_emb, dim = 1 )
# Sometimes we only want the classification
if only_classify:
return softmax_emb
else:
return common_emb , softmax_emb
class Encoder(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,16,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(16,32,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(32,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 5
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
class Encoder2(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder2,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,8,3,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(8,16,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(16,32,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(32,64,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Out comes shape 3x3x64
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(9*64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
| 4,630 | 27.763975 | 103 |
py
|
airloc
|
airloc-master/doerchnet/train.py
|
import os
import torch
import random
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from shutil import copyfile
from utils.utils import get_random_crops, get_deterministic_crops, load_normalize_data
from doerchnet.utils import sample_doerch_crops , visualize_doerch, get_label, visualize_batch_doerch, calculate_precision
from doerchnet.networks import AJNet
from doerchnet.share_net import ShareNet
import torch.optim as optim
from utils.stat_collector import StatCollector
from config import CONFIG
try:
# Set all hyperparameters here to not clutter the config file
net_type = 'ShareNet'
seed = 0
batch_size = 64
epochs = 100000
multiply_images = 4
# 8 dim -> local estimation of neibouring patches
# 25 dim -> global guess on where target is
# 80 dim -> Global relative to start
dim = 8
max_dist = 1
optimizer_type = 'adam'
learning_rate = 1e-4
lower_learning_rate_factor = 0.33
momentum = 0.95
beta1 = 0.9
beta2 = 0.999
print_iter = 100
vis_iter = 1000
# Set seeds
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
info = dict([
("NetType" , net_type),
("Completed" , False),
("Metrics", [
"Loss",
"ValLoss",
"Accuracy",
"ValAccuracy",
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
"AccuracyCorners",
"AccuracyBoundaries",
"AccuracyMiddle",
"ValAccuracyCorners",
"ValAccuracyBoundaries",
"ValAccuracyMiddle",
]),
("LogDir" , None),
("Blocks" , [2 ,2 , 2]),
("NbrParameters" , 0),
("LatentSpaceSize" , 0)
])
if dim not in [25 , 80]:
# Remove accuracy region metrics
for m in ["AccuracyCorners" , "AccuracyBoundaries" , "AccuracyMiddle"]:
info['Metrics'].remove(m)
info['Metrics'].remove("Val" + m)
metrics = info['Metrics']
# Function used to update leanring rate
def update_learning_rate(optimizer , learning_rate):
for params in opimizier.param_groups:
params['lr'] = learning_rate
# Find device
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
CONFIG.device = device
# Load data
trainloader,valloader = load_normalize_data(download = False, batch_size = batch_size , multiply_images = multiply_images)
print(f"Trainloader: {len(trainloader)}")
print(f"Valloader: {len(valloader)}")
# Save information about dataloaders
info['Dataset'] = CONFIG.MISC_dataset
info['ValLoaderLength'] = len(valloader)
info['TrainLoaderLength'] = len(trainloader)
valiter = iter(valloader)
tot_itr = 0
tot_nbr_itr = epochs * len(trainloader)
if net_type == 'ShareNet':
net = ShareNet(num_out_classes = dim)
else:
net = AJNet(net_type, num_classes=dim, both_branches = True)
# Count number of paramters
net_parameters = filter(lambda p: p.requires_grad, net.parameters())
info['NbrParameters'] = int(sum([np.prod(p.size()) for p in net_parameters]))
# Record latentspace size in info file
noise = torch.randn(1, net.n_chan , CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]).cpu()
latent , _ = net(noise,noise)
latentSpaceSize = int(np.prod(list(latent.shape)))
info['LatentSpaceSize'] = latentSpaceSize
info['InputSpaceSize'] = int(np.prod(list(noise.shape)))
net = net.to(device)
# Set optimizer
if optimizer_type == 'sgd':
optimizer = optim.SGD(net.parameters() , lr = learning_rate, momentum = momentum)
elif optimizer_type == 'adam':
optimizer = optim.Adam(net.parameters() , lr = learning_rate, betas =(beta1 , beta2))
else:
raise(Exception("Unknown optimizer type:\t%s" % optimizer_type))
# Select loss function
criterion = nn.CrossEntropyLoss()
# Setup stat collector
log_dir_name = os.path.basename(CONFIG.STATS_log_dir)
CONFIG.STATS_DORCH_log_dir = os.path.join(CONFIG.MISC_project_root_path, "doerchnet", "logs", log_dir_name)
os.makedirs(CONFIG.STATS_DORCH_log_dir)
exclude_prints = [
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
] # Does not print these statistics
CONFIG.STATS_DOERCH_vis_dir = os.path.join(CONFIG.STATS_DORCH_log_dir, "visualizations")
os.makedirs(CONFIG.STATS_DOERCH_vis_dir)
sc = StatCollector(CONFIG.STATS_DORCH_log_dir, tot_nbr_itr, print_iter = print_iter, exclude_prints = exclude_prints)
for metric in metrics:
sc.register(metric , {'type':'avg','freq':'step'})
# Enter log dir and write to file
info['LogDir'] = CONFIG.STATS_DORCH_log_dir
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info, io , indent = 4)
# Save all files
copyfile("doerchnet/train.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "train.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "config.py"))
copyfile("doerchnet/networks.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "networks.py"))
copyfile("doerchnet/share_net.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "share_net.py"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
patch_dims = (1,3,CONFIG.MISC_patch_size[0],CONFIG.MISC_patch_size[1])
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Decrease learning rate every 20 epoch
current_learning_rate = learning_rate
action_dist = torch.zeros(dim)
action_taken = torch.zeros(dim)
def update_learning_rate(optimizer , learning_rate):
for params in optimizer.param_groups:
params['lr'] = learning_rate
print("NetType:\t%s" % net_type)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
for epoch in range(epochs):
for (i,data) in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = data
batch_images = batch_images
# Get random crops
crops_start, locs_start = get_random_crops(batch_images)
crops_goal, locs_goal = get_random_crops(batch_images, locs_start, max_dist = max_dist)
actions = get_label(locs_start, locs_goal, dim = dim)
crops_start = crops_start.to(device)
crops_goal = crops_goal.to(device)
action_dist += actions.sum(dim=0)
temp_action_dist = action_dist / action_dist.sum()
actions = actions.to(device)
_ , outputs = net(crops_start,crops_goal)
loss = criterion(outputs, actions ).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# here do visualization
if tot_itr % vis_iter == 0:
pass
#visualize_doerch(batch_images, locs_start, locs_goal ,outputs, unNormImage, save_name = 'train_vis_%d' % tot_itr, PATH = CONFIG.STATS_DORCH_log_dir)
sc.s('Loss').collect(loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(outputs , actions)
sc.s('AccuracyCorners').collect(prec_corner)
sc.s('AccuracyBoundaries').collect(prec_boundary)
sc.s('AccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
acc = (torch.argmax(actions , dim = 1, keepdim = True) == torch.argmax(outputs , dim = 1, keepdim = True)).float().mean()
# print(actions.argmax(dim=1,keepdim=True))
sc.s('Accuracy').collect(acc.item())
sc.s('ActionsTaken').collect(F.one_hot(outputs.argmax(dim = 1, keepdim = False ), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('CorrectActions').collect(actions.mean(dim = 0).cpu().numpy())
with torch.no_grad():
# Run one batch on the valloader aswell
try:
val_images, (start_crops_ , goal_crops_) = next(valiter)
except:
valiter = iter(valloader)
val_images, ( start_crops_ , goal_crops_) = next(valiter)
val_images = val_images.to(device)
# Get random crops
val_crops_start, val_locs_start = get_random_crops(val_images)
val_crops_goal, val_locs_goal = get_random_crops(val_images, val_locs_start, max_dist = max_dist)
val_actions = get_label(val_locs_start, val_locs_goal, dim = dim)
val_crops_start = val_crops_start.to(device)
val_crops_goal = val_crops_goal.to(device)
val_actions = val_actions.to(device)
_ , val_outputs = net(val_crops_start,val_crops_goal)
val_loss = criterion(val_outputs, val_actions ).to(device)
# Logging
sc.s('ValLoss').collect(val_loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(val_outputs , val_actions)
sc.s('ValAccuracyCorners').collect(prec_corner)
sc.s('ValAccuracyBoundaries').collect(prec_boundary)
sc.s('ValAccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
val_acc = (torch.argmax(val_actions , dim = 1, keepdim = True) == torch.argmax(val_outputs , dim = 1, keepdim = True)).float().mean()
sc.s('ValAccuracy').collect(val_acc.item())
sc.s('ValActionsTaken').collect(F.one_hot(val_outputs.argmax(dim = 1, keepdim = False), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('ValCorrectActions').collect(val_actions.mean(dim = 0).cpu().numpy())
# here do visualization
if tot_itr % vis_iter == 0:
# If segmentation information is enabled remove it and only visualize the RGB imaegs
if CONFIG.RL_priv_use_seg:
val_images = val_images[:,0:3,:]
visualize_batch_doerch(val_images, val_locs_start , val_locs_goal , val_outputs, unNormImage,PATH = CONFIG.STATS_DOERCH_vis_dir, save_name = "val_%d" % tot_itr)
if tot_itr % print_iter == 0 or tot_itr == tot_nbr_itr - 1:
print("Iteration:\t%d / %d" % ( tot_itr, tot_nbr_itr))
sc.print()
#print(action_dist)
sc.save()
tot_itr += 1
if tot_itr % 5000 == 0:
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
# Lower the learning rate NOTE not active
if (epoch + 1) % 10 == 0 and False:
current_learning_rate *= lower_learning_rate_factor
update_learning_rate(optimizer, current_learning_rate)
info['Completed'] = True
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info , io , indent = 4)
except:
# TODO - Use signal handlers instead so that we can propagate the exceptions
#raise
while True:
i = input("save the model")
if i=='y':
# Save the encoder and the decoder
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
print("model saved")
exit(1)
elif i == 'n':
print("Not saving")
exit(1)
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
| 12,721 | 36.528024 | 176 |
py
|
airloc
|
airloc-master/doerchnet/logs/with-sem-seg/share_net.py
|
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,784 | 25.028037 | 115 |
py
|
airloc
|
airloc-master/doerchnet/logs/without-sem-seg/share_net.py
|
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,755 | 24.518519 | 115 |
py
|
airloc
|
airloc-master/doerchnet/logs/without-sem-seg-pre-michael/share_net.py
|
import torch
import torch.nn as nn
from config import CONFIG
class ShareNet(nn.Module):
def __init__(self , num_out_classes = 8):
super(ShareNet, self).__init__()
# Check wether to include segmentation channel
if CONFIG.RL_priv_use_seg or CONFIG.RL_predict_seg_mask:
self.seg_chan = 1
else:
self.seg_chan = 0
self.n_chan = 3 + self.seg_chan
self.latent_space_dim = 256
# Start assembling the network.
# For now non-siamese setup
self.start_double_conv1 = DoubleConvBlock(self.n_chan)
self.goal_double_conv1 = DoubleConvBlock(self.n_chan)
intermediate_n_chans = self.start_double_conv1.out_chan
self.start_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.goal_double_conv2 = DoubleConvBlock( intermediate_n_chans * 2)
self.flat = nn.Flatten(start_dim = 1)
self.common_layer_dim = 2 * self.start_double_conv2.out_chan * ((CONFIG.MISC_patch_size[0] // ( 2**4) )**2)
# Common linear layer
self.fc_int = nn.Linear( self.common_layer_dim , self.latent_space_dim)
self.fc_out = nn.Linear( self.latent_space_dim , num_out_classes)
self.act = nn.ReLU()
def forward(self, start , goal):
start_1 = self.start_double_conv1(start)
goal_1 = self.goal_double_conv1(goal)
start_goal = torch.cat((start_1, goal_1 ) , dim = 1)
goal_start = torch.cat((goal_1, start_1 ) , dim = 1)
start_2 = self.start_double_conv2(start_goal)
goal_2 = self.goal_double_conv2(goal_start)
emb = torch.cat((start_2, goal_2), dim = 1)
emb = self.flat(emb)
# First linear layer to latent space
emb = self.fc_int(emb)
emb = self.act(emb)
out = self.fc_out(emb)
return emb, out
class DoubleConvBlock(nn.Module):
def __init__(self, in_channels, kernel_size = 3):
super(DoubleConvBlock, self).__init__()
self.block1 = ConvBlock(in_channels, 2 * in_channels , kernel_size)
self.block2 = ConvBlock(2 * in_channels, 3 * in_channels , kernel_size)
self.out_chan = 3 * in_channels
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
return x
class ConvBlock(nn.Module):
def __init__(self , in_channels, out_channels , kernel_size = 3):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels , out_channels , kernel_size, padding = 'same')
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2,2)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
x = self.pool(x)
return x
| 2,785 | 24.796296 | 115 |
py
|
airloc
|
airloc-master/doerchnet/logs/without-sem-seg-pre-michael/networks.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class AJNet(nn.Module):
def __init__(self,net_type,mode = 'train', unit_size = 128,num_classes =8 , both_branches = True):
super(AJNet,self).__init__()
# Allow for pretraining network with ground truth segmentation mask
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset == 'masa_filt':
self.n_chan = 4
else:
raise(Exception("Unkown dataset with segmentation info"))
else:
# Regular RGB inputs
self.n_chan = 3
self.mode = mode
self.both_branches = both_branches
# Choose the embedder for the Doerch net
# We start by assuming that we are using fixed weights
if net_type == 'Doerch':
self.start_enc = Encoder(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
elif net_type == 'Doerch2':
self.start_enc = Encoder2(unit_size , input_channels = self.n_chan)
self.goal_enc = self.start_enc
else:
print(f"Unknown embedder {net_type} for this task")
exit(1)
if both_branches:
first_common_size = 2 * unit_size
else:
first_common_size = unit_size
self.relu = nn.ReLU()
self.common_fc_1 = nn.Linear(first_common_size, 2*unit_size)
self.common_fc_2 = nn.Linear(2*unit_size,num_classes)
def forward(self,start,goal, only_classify = False):
if self.both_branches:
start_emb = self.start_enc(start)
goal_emb = self.goal_enc(goal)
if self.both_branches:
common_emb = torch.cat([start_emb, goal_emb], dim = 1)
else:
common_emb = goal_emb
common_emb = self.relu(common_emb)
common_emb = self.common_fc_1(common_emb)
softmax_emb = self.common_fc_2(self.relu(common_emb))
softmax_emb = F.softmax(softmax_emb, dim = 1 )
# Sometimes we only want the classification
if only_classify:
return softmax_emb
else:
return common_emb , softmax_emb
class Encoder(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,16,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(16,32,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(32,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 5
self.modules += [nn.Conv2d(64,64,3,1,1)]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
class Encoder2(nn.Module):
def __init__(self, unit_size = 64, input_channels = 3):
super(Encoder2,self).__init__()
self.modules = []
# Conv block 1
self.modules += [nn.Conv2d(input_channels,8,3,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 2
self.modules += [nn.Conv2d(8,16,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 3
self.modules += [nn.Conv2d(16,32,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Conv block 4
self.modules += [nn.Conv2d(32,64,5,1,padding = 'same')]
self.modules += [nn.ReLU()]
self.modules += [nn.MaxPool2d(2,2)]
# Out comes shape 3x3x64
# Fc layer to map to correct unit size
self.modules += [nn.Flatten()]
self.modules += [nn.Linear(9*64,unit_size)]
self.net = nn.Sequential(*self.modules)
def forward(self,patch):
return self.net(patch)
| 4,630 | 27.763975 | 103 |
py
|
airloc
|
airloc-master/doerchnet/logs/without-sem-seg-pre-michael/config.py
|
"""
Central configration file for the project. Acts as a storage
of global variables and various configuration settings.
TODO TODO TODO: Remove unnecessary fields below; this is copied from an earlier
project
"""
import os
import pprint
from easydict import EasyDict as edict
from datetime import datetime
import socket # To Resolve host name and set path appropriatly
# TODO: If we're to use rllib, then this may need to be uncommented.
#from rllib.utils import rllib_get_config
CONFIG = edict()
"""
Evaluation of RL-agent
"""
# These settings control the evaluation runs of the saved agents.
# EVAL_RL_log is which saved agent should be used. If a number n, it picks the n:th,
# latest log availabled. Note. n=1 picks the penultimate available log
# If set to a specific log it tries to load that log
CONFIG.EVAL_RL_log = None
CONFIG.EVAL_RL_saved_logs = False # If enabled picks the model from those in saved_logs
CONFIG.EVAL_RL_multiply_images = 1
CONFIG.EVAL_save_vis_iter = 10
CONFIG.EVAL_RL_use_val_set = True
"""
RL-agent
"""
######################### This is where the important settings start #########################
# Batch n Stuff
CONFIG.RL_nbr_epochs = 10000
CONFIG.RL_batch_size = 32
CONFIG.RL_multiply_images = 2
CONFIG.RL_max_episode_length = 10
CONFIG.MISC_priv = False
# Architecture
CONFIG.RL_agent_network = 'LSTMAgent' # AiRLoc agent
CONFIG.RL_patch_embedder = 'ShareNet'
CONFIG.RL_freeze_patch_embedder = True
CONFIG.RL_priv_pretrained = True
CONFIG.EE_temporal = True
CONFIG.EE_residual = True
# Optimizer
CONFIG.RL_learning_rate = 1e-4
CONFIG.RL_nbr_eps_update = (CONFIG.RL_batch_size * CONFIG.RL_multiply_images)//1
CONFIG.RL_weight_decay = 0
CONFIG.RL_momentum = 0.90
CONFIG.RL_optimizer = 'adam'
CONFIG.RL_beta1 = 0.9
CONFIG.RL_beta2 = 0.999
#Env setup
CONFIG.RL_agent_allowed_outside = True
CONFIG.RL_normalize_weights = True
CONFIG.RL_eval_deterministic = True
CONFIG.RL_priv_grid_location = False
CONFIG.RL_priv_use_seg = False#True # Set to True when training sem seg-based RL-agent (but False during inference -- should not use ground truth then!)
"""
RL Rewards
"""
CONFIG.RL_reward_goal = 3
CONFIG.RL_reward_failed = 0
CONFIG.RL_reward_closer = 0
CONFIG.RL_reward_iou_scale = 0
CONFIG.RL_reward_step_outside = 0
CONFIG.RL_reward_distance = False
CONFIG.RL_reward_step = -1
# LSTM Agent settings
CONFIG.RL_LSTM_pos_emb = True
# Pretrained doerch
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/without-sem-seg' # without sem-seg
#CONFIG.RL_pretrained_doerch_net = 'doerchnet/logs/with-sem-seg' # with sem-seg
######################### This is where they end #########################
CONFIG.RL_max_start_goal_dist = 8
CONFIG.RL_min_start_goal_iou = None
CONFIG.RL_done_iou = 0.40
CONFIG.RL_discount_factor = 0.9
CONFIG.RL_softmax_step_size = 1.1 # When 1 step equal non-overlapping patches
CONFIG.RL_entropy = None
CONFIG.RL_entropy_lower = None
# Pretrained segmenter
CONFIG.RL_pretrained_segmentation_net = 'segmentations/logs/sem-seg-model'
CONFIG.RL_predict_seg_mask = False # Set to True during inference if using a sem-seg based RL-agent
"""
Random Search baseline agent
"""
CONFIG.RANDOM_batch_size = 1
CONFIG.RANDOM_using_memory = True # If true, the agent cannot visit the same patch twice
CONFIG.RANDOM_stop_iou = 0.2 # Not used in grid game setup
CONFIG.RANDOM_min_iou_visited = 0.3 # At what IoU should a location be considered already visited (not used in grid game setup)
CONFIG.RANDOM_WARNING_steps = 500 # Warn user if agent takes this many step without funding goal
"""
Statistics / Logging / Plotting
"""
CONFIG.STATS_dir_base = os.path.dirname(os.path.abspath(__file__))
CONFIG.STATS_log_dir_base = os.path.join(CONFIG.STATS_dir_base, 'logs')
CONFIG.STATS_log_dir = os.path.join(CONFIG.STATS_log_dir_base,
str(datetime.now()).replace(' ', '_')
.replace(':', '-').replace('.', '-'))
"""
Plotting
"""
# The option below lets the user choose which LOG directory to plot information from
# An integer signifies the n:th most recent log. A specific log name tries to find that directory
CONFIG.PLOT_log_dir = 1
# The option below lets the user choose which EVAL directory to plot information from.
# I.e, choose which eval session to plot from given a specific training session
CONFIG.PLOT_eval_dir = None
"""
Miscellaneous
"""
CONFIG.MISC_include_baseline = True
CONFIG.MISC_use_gpu = False#True
CONFIG.MISC_dataset = 'images_post'#'images_pre'
CONFIG.MISC_dataset_split_file = None
CONFIG.MISC_grid_game = True
CONFIG.MISC_random_seed = 0
#CONFIG.MISC_rnd_crop = True
CONFIG.MISC_rgb_max = 255
#CONFIG.MISC_im_size = (256, 256)
CONFIG.MISC_step_sz = int(48*CONFIG.RL_softmax_step_size)
CONFIG.MISC_game_size = 5
CONFIG.MISC_im_size = (int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48),
int(CONFIG.MISC_step_sz*(CONFIG.MISC_game_size-1)+48))
CONFIG.MISC_patch_size = (48, 48)
CONFIG.MISC_print_iter = 50
CONFIG.MISC_save_vis_iter = 400 # How often we save a visualiation
CONFIG.MISC_vis_per_batch = 12
CONFIG.MISC_save_model_iter = 5000 # How often should we save the model weights
CONFIG.MISC_project_root_path = os.path.dirname(__file__)
CONFIG.MISC_main_pid = os.getpid()
CONFIG.MISC_dataset_path = "data" # Set accordingly
| 5,311 | 33.718954 | 153 |
py
|
airloc
|
airloc-master/doerchnet/logs/without-sem-seg-pre-michael/train.py
|
import os
import torch
import random
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from shutil import copyfile
from utils.utils import get_random_crops, get_deterministic_crops, load_normalize_data
from doerchnet.utils import sample_doerch_crops , visualize_doerch, get_label, visualize_batch_doerch, calculate_precision
from doerchnet.networks import AJNet
from doerchnet.share_net import ShareNet
import torch.optim as optim
from utils.stat_collector import StatCollector
from config import CONFIG
try:
# Set all hyperparameters here to not clutter the config file
net_type = 'ShareNet'
seed = 0
batch_size = 64
epochs = 100000
multiply_images = 4
# 8 dim -> local estimation of neibouring patches
# 25 dim -> global guess on where target is
# 80 dim -> Global relative to start
dim = 8
max_dist = 1
optimizer_type = 'adam'
learning_rate = 1e-4
lower_learning_rate_factor = 0.33
momentum = 0.95
beta1 = 0.9
beta2 = 0.999
print_iter = 100
vis_iter = 1000
# Set seeds
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
info = dict([
("NetType" , net_type),
("Completed" , False),
("Metrics", [
"Loss",
"ValLoss",
"Accuracy",
"ValAccuracy",
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
"AccuracyCorners",
"AccuracyBoundaries",
"AccuracyMiddle",
"ValAccuracyCorners",
"ValAccuracyBoundaries",
"ValAccuracyMiddle",
]),
("LogDir" , None),
("Blocks" , [2 ,2 , 2]),
("NbrParameters" , 0),
("LatentSpaceSize" , 0)
])
if dim not in [25 , 80]:
# Remove accuracy region metrics
for m in ["AccuracyCorners" , "AccuracyBoundaries" , "AccuracyMiddle"]:
info['Metrics'].remove(m)
info['Metrics'].remove("Val" + m)
metrics = info['Metrics']
# Function used to update leanring rate
def update_learning_rate(optimizer , learning_rate):
for params in opimizier.param_groups:
params['lr'] = learning_rate
# Find device
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
CONFIG.device = device
# Load data
trainloader,valloader = load_normalize_data(download = False, batch_size = batch_size , multiply_images = multiply_images)
print(f"Trainloader: {len(trainloader)}")
print(f"Valloader: {len(valloader)}")
# Save information about dataloaders
info['Dataset'] = CONFIG.MISC_dataset
info['ValLoaderLength'] = len(valloader)
info['TrainLoaderLength'] = len(trainloader)
valiter = iter(valloader)
tot_itr = 0
tot_nbr_itr = epochs * len(trainloader)
if net_type == 'ShareNet':
net = ShareNet(num_out_classes = dim)
else:
net = AJNet(net_type, num_classes=dim, both_branches = True)
# Count number of paramters
net_parameters = filter(lambda p: p.requires_grad, net.parameters())
info['NbrParameters'] = int(sum([np.prod(p.size()) for p in net_parameters]))
# Record latentspace size in info file
noise = torch.randn(1, net.n_chan , CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]).cpu()
latent , _ = net(noise,noise)
latentSpaceSize = int(np.prod(list(latent.shape)))
info['LatentSpaceSize'] = latentSpaceSize
info['InputSpaceSize'] = int(np.prod(list(noise.shape)))
net = net.to(device)
# Set optimizer
if optimizer_type == 'sgd':
optimizer = optim.SGD(net.parameters() , lr = learning_rate, momentum = momentum)
elif optimizer_type == 'adam':
optimizer = optim.Adam(net.parameters() , lr = learning_rate, betas =(beta1 , beta2))
else:
raise(Exception("Unknown optimizer type:\t%s" % optimizer_type))
# Select loss function
criterion = nn.CrossEntropyLoss()
# Setup stat collector
log_dir_name = os.path.basename(CONFIG.STATS_log_dir)
CONFIG.STATS_DORCH_log_dir = os.path.join(CONFIG.MISC_project_root_path, "doerchnet", "logs", log_dir_name)
os.makedirs(CONFIG.STATS_DORCH_log_dir)
exclude_prints = [
"ActionsTaken",
"ValActionsTaken",
"CorrectActions",
"ValCorrectActions",
] # Does not print these statistics
CONFIG.STATS_DOERCH_vis_dir = os.path.join(CONFIG.STATS_DORCH_log_dir, "visualizations")
os.makedirs(CONFIG.STATS_DOERCH_vis_dir)
sc = StatCollector(CONFIG.STATS_DORCH_log_dir, tot_nbr_itr, print_iter = print_iter, exclude_prints = exclude_prints)
for metric in metrics:
sc.register(metric , {'type':'avg','freq':'step'})
# Enter log dir and write to file
info['LogDir'] = CONFIG.STATS_DORCH_log_dir
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info, io , indent = 4)
# Save all files
copyfile("doerchnet/train.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "train.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "config.py"))
copyfile("doerchnet/networks.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "networks.py"))
copyfile("doerchnet/share_net.py" , os.path.join(CONFIG.STATS_DORCH_log_dir, "share_net.py"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
patch_dims = (1,3,CONFIG.MISC_patch_size[0],CONFIG.MISC_patch_size[1])
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Decrease learning rate every 20 epoch
current_learning_rate = learning_rate
action_dist = torch.zeros(dim)
action_taken = torch.zeros(dim)
def update_learning_rate(optimizer , learning_rate):
for params in optimizer.param_groups:
params['lr'] = learning_rate
print("NetType:\t%s" % net_type)
print("Dataset:\t%s" % CONFIG.MISC_dataset)
for epoch in range(epochs):
for (i,data) in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = data
batch_images = batch_images
# Get random crops
crops_start, locs_start = get_random_crops(batch_images)
crops_goal, locs_goal = get_random_crops(batch_images, locs_start, max_dist = max_dist)
actions = get_label(locs_start, locs_goal, dim = dim)
crops_start = crops_start.to(device)
crops_goal = crops_goal.to(device)
action_dist += actions.sum(dim=0)
temp_action_dist = action_dist / action_dist.sum()
actions = actions.to(device)
_ , outputs = net(crops_start,crops_goal)
loss = criterion(outputs, actions ).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# here do visualization
if tot_itr % vis_iter == 0:
pass
#visualize_doerch(batch_images, locs_start, locs_goal ,outputs, unNormImage, save_name = 'train_vis_%d' % tot_itr, PATH = CONFIG.STATS_DORCH_log_dir)
sc.s('Loss').collect(loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(outputs , actions)
sc.s('AccuracyCorners').collect(prec_corner)
sc.s('AccuracyBoundaries').collect(prec_boundary)
sc.s('AccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
acc = (torch.argmax(actions , dim = 1, keepdim = True) == torch.argmax(outputs , dim = 1, keepdim = True)).float().mean()
# print(actions.argmax(dim=1,keepdim=True))
sc.s('Accuracy').collect(acc.item())
sc.s('ActionsTaken').collect(F.one_hot(outputs.argmax(dim = 1, keepdim = False ), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('CorrectActions').collect(actions.mean(dim = 0).cpu().numpy())
with torch.no_grad():
# Run one batch on the valloader aswell
try:
val_images, (start_crops_ , goal_crops_) = next(valiter)
except:
valiter = iter(valloader)
val_images, ( start_crops_ , goal_crops_) = next(valiter)
val_images = val_images.to(device)
# Get random crops
val_crops_start, val_locs_start = get_random_crops(val_images)
val_crops_goal, val_locs_goal = get_random_crops(val_images, val_locs_start, max_dist = max_dist)
val_actions = get_label(val_locs_start, val_locs_goal, dim = dim)
val_crops_start = val_crops_start.to(device)
val_crops_goal = val_crops_goal.to(device)
val_actions = val_actions.to(device)
_ , val_outputs = net(val_crops_start,val_crops_goal)
val_loss = criterion(val_outputs, val_actions ).to(device)
# Logging
sc.s('ValLoss').collect(val_loss.item())
# Calculate precision of model at corners, boundaries and middle part
if dim in [25 , 80]:
prec_corner, prec_boundary , prec_middle = calculate_precision(val_outputs , val_actions)
sc.s('ValAccuracyCorners').collect(prec_corner)
sc.s('ValAccuracyBoundaries').collect(prec_boundary)
sc.s('ValAccuracyMiddle').collect(prec_middle)
# Calculate the accuracy
val_acc = (torch.argmax(val_actions , dim = 1, keepdim = True) == torch.argmax(val_outputs , dim = 1, keepdim = True)).float().mean()
sc.s('ValAccuracy').collect(val_acc.item())
sc.s('ValActionsTaken').collect(F.one_hot(val_outputs.argmax(dim = 1, keepdim = False), num_classes=dim).float().mean(dim = 0).cpu().numpy())
sc.s('ValCorrectActions').collect(val_actions.mean(dim = 0).cpu().numpy())
# here do visualization
if tot_itr % vis_iter == 0:
# If segmentation information is enabled remove it and only visualize the RGB imaegs
if CONFIG.RL_priv_use_seg:
val_images = val_images[:,0:3,:]
visualize_batch_doerch(val_images, val_locs_start , val_locs_goal , val_outputs, unNormImage,PATH = CONFIG.STATS_DOERCH_vis_dir, save_name = "val_%d" % tot_itr)
if tot_itr % print_iter == 0 or tot_itr == tot_nbr_itr - 1:
print("Iteration:\t%d / %d" % ( tot_itr, tot_nbr_itr))
sc.print()
#print(action_dist)
sc.save()
tot_itr += 1
if tot_itr % 5000 == 0:
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
# Lower the learning rate NOTE not active
if (epoch + 1) % 10 == 0 and False:
current_learning_rate *= lower_learning_rate_factor
update_learning_rate(optimizer, current_learning_rate)
info['Completed'] = True
with open(os.path.join(CONFIG.STATS_DORCH_log_dir, "info.json") , 'w') as io:
json.dump(info , io , indent = 4)
except:
# TODO - Use signal handlers instead so that we can propagate the exceptions
#raise
while True:
i = input("save the model")
if i=='y':
# Save the encoder and the decoder
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
print("model saved")
exit(1)
elif i == 'n':
print("Not saving")
exit(1)
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_DORCH_log_dir, "doerch_embedder"))
| 12,708 | 36.600592 | 176 |
py
|
airloc
|
airloc-master/segmentations/plot_results.py
|
#!/bin/env python3
import os
import time
import sys
import math
from dateutil.parser import parse
import numpy as np
from config import CONFIG
import json
import argparse
from utils.utils import find_latest_log
MA_SMOOTH = 0.0025
START_ITER_PLOT = 30
# What are the customs_mas ?
def _custom_ma(data, ma_smooth=MA_SMOOTH):
for idx, val in enumerate(data['values']):
if idx < 30:
data['mas_custom'][idx] = data['means'][idx]
else:
data['mas_custom'][idx] = (1 - ma_smooth) * data['mas_custom'][idx - 1] + ma_smooth * data['values'][idx]
# Function for plotting each subplot
def _plot(datas, ax, title='plot', xlabel='x', ylabel='y', start_it=0, max_x=None, max_y=None, min_y = None,
show_draw='show' , legends = []):
legend_entries = []
for (i, data) in enumerate(datas):
# If the current data is full val print all values
if legends[i] == 'FullVal':
# Full val values are very sparse, no mean stuff and no filtering by start
x = data['times']
y = data['values']
format = 'x'
else:
start_it = START_ITER_PLOT
x = data['times'][start_it:] #Just iterations
y = data['mas_custom'][start_it:] #Some special mean value
format = '-'
p, = ax.plot(x, y, format)
if len(legends) > i:
p.set_label(legends[i])
if len(legends) > 0:
ax.legend()
ax.grid(False)
# Calculate the axis in plot
if min_y is None:
min_y = np.min(y[start_it:])
if max_x is None:
max_x = x[-1]
if max_y is None:
max_y = np.max(y[start_it:])
# Setup argparse
parser = argparse.ArgumentParser()
# Choose log dir either based on name or on number n
log_selection = parser.add_mutually_exclusive_group()
log_selection.add_argument("--log-dir" , "-l" , type = str , help = "Select log dir based on name")
log_selection.add_argument("-n", type = int , help = 'Select the n:th latest log dir. 0 -> latest',default = 0)
parser.add_argument("--show" , action="store_true", default = False, help = "Show the plot on the screen instead of saving it.")
args = parser.parse_args()
## Script part
# Load and set correct settings for matplotlib based on wether to show the plot or just save it
if args.show:
import tkinter
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
else:
import matplotlib
from matplotlib import pyplot as plt
log_base = os.path.join(CONFIG.MISC_project_root_path , "segmentations" , "logs")
# Next determine which log dir should be used
if args.log_dir is not None:
# Select dir named PLOT_log_dir
log_dir = args.log_dir
else:
# select the n:th latest log dir
log_dir = find_latest_log(log_base , args.n)
# If log_dir is None, there were not that many logs
if log_dir is None:
print("There are not that many training results in %s" % log_base)
exit(1)
path_log_dir = os.path.join(log_base , log_dir)
# We have now identified a log dir from a training session
# We make sure that the directory actually exists before proceeding
if not os.path.exists(path_log_dir):
print("Error, the selected log dir does not exist:\t%s" % path_log_dir)
print("Check arguments and/or plot settings in config.py")
exit(1)
# Open json containing information about the training session
try:
with open(os.path.join(path_log_dir , "info.json") , 'r') as json_file:
training_info = json.load(json_file)
except:
print("\nERROR: Unable to open info json.\n")
exit(1)
# Now check wheter to print training data or evaluation data
if True:
# Plot training data
# TODO - Put training data in sub folder. like "training_stats"
data_type = 'Training'
path_log_data_dir = path_log_dir
# Since we will be plotting training info. The info json will be the same as the training_info json
info = training_info
else:
# Plot evaluation data
# First need to determine which eval run to plot from
# This works the same way as when we choose which log dir to use
path_eval_dir_base = os.path.join(path_log_dir , "eval_logs")
if type(CONFIG.PLOT_eval_dir) == str:
eval_dir = CONFIG.PLOT_eval_dir
elif type(CONFIG.PLOT_eval_dir) == int:
# select the n:th latest log dir
eval_dir = find_latest_log(path_eval_dir_base , CONFIG.PLOT_eval_dir)
else:
# Select the latest available log dir
eval_dir = find_latest_log(path_eval_dir_base , 0)
if eval_dir is None:
print("There are not that many eval results in %s" % path_log_dir)
exit(1)
# We now have path_log_data_dir which contains all metrics
path_log_data_dir = os.path.join(path_eval_dir_base, eval_dir)
data_type = 'Eval'
# Load information about this eval run from the info file TODO change back to info.json
with open(os.path.join(path_log_data_dir,"info.json"), 'r') as json_file:
info = json.load(json_file)
# The correct directory containing the data we want to plot is now in 'path_log_data_dir'
netType = training_info['NetType']
metrics = info['Metrics']
#startedTrainingAt = training_info['StartedTraining']
nbrOfTrainableParameters = training_info['NbrParameters']
#dataset = training_info['Dataset']
# Before plotting, print information about the retrived data
print('')
print("Training session:\t%s" % log_dir)
print("Log directory:\t%s" % log_base)
print("NetType:\t%s" % netType)
print("Number of trainable parameters:\t%d" % nbrOfTrainableParameters )
#print("Dataset:\t%s" % dataset)
# Filterd
filterdMetrics = list(filter(lambda s: not s.startswith('Val') and not s.startswith('FullVal'),metrics ))
# Calculate dimensions of subplots
n_cols = math.ceil(math.sqrt(len(filterdMetrics)))
n_rows = math.ceil(len(filterdMetrics) / n_cols)
# Plot all metrics for the selected run in same figure.
fig , axes = plt.subplots(n_rows, n_cols, sharex = False)
#np.ndindex(axes.shape)
ax_counter = 0
for (i, metric) in enumerate(metrics):
#ix , iy = axis_inds
if ( i >= len(metrics) or metric.startswith('Val')):
continue
# For now there are Some FullEval metrics that are not to be plottedd
if metric.startswith('FullVal'):
continue
ax = axes[ax_counter]
ax_counter += 1
# Read data from log path
log_path = os.path.join(path_log_data_dir, metric + '.npz')
try:
data = np.load(log_path)
except:
print("\nERROR: Unable to load data for metric:\t%s\n" % metric)
exit(1)
data = {'means': data['means'], 'mas': data['mas'],
'values': data['values'], 'times': data['times'],
'mas_custom': np.zeros_like(data['mas'])}
_custom_ma(data)
legends = ['Train']
plotData = [data]
# Check if there is val data availble
if 'Val' + metric in metrics:
valData = np.load(os.path.join(path_log_data_dir , 'Val' + metric + '.npz'))
valData = {'means': valData['means'], 'mas': valData['mas'],
'values': valData['values'], 'times': valData['times'],
'mas_custom': np.zeros_like(valData['mas'])}
_custom_ma(valData)
legends.append('Val')
plotData.append(valData)
try:
# Check if there is full val available in the data
if 'FullVal' + metric in metrics:
fullValData = np.load(os.path.join(path_log_data_dir , 'FullVal' + metric + '.npz'))
fullValData = {'means': fullValData['means'], 'mas': fullValData['mas'],
'values': fullValData['values'], 'times': fullValData['times'],
'mas_custom': np.zeros_like(fullValData['mas'])}
_custom_ma(fullValData)
fullValData['times'] = np.array(info['FullValIters'])
if len(fullValData['times']) > 0:
legends.append('FullVal')
plotData.append(fullValData)
except:
pass
# Now check loaded data to make sure there are enough data points
if data['mas_custom'].shape[0] <= START_ITER_PLOT:
print("\nERROR: Too few data points saved for plotting.\n")
exit(1)
_plot(plotData,ax, show_draw='show' , legends =legends)
# Set title according to the json data file
ax.set_title(metric)
# Set title of entire window
fig.canvas.manager.set_window_title("%s data from %s:\t%s" %( data_type, netType , log_dir))
if args.show:
plt.show()
else:
# Find filepath
filename = os.path.join(path_log_dir, "Training_Statistics_%s_%s.png" % (netType , log_dir))
plt.savefig(filename)
print("\nPlot saved as:\t%s\n" % os.path.basename(filename))
| 8,700 | 32.988281 | 128 |
py
|
airloc
|
airloc-master/segmentations/unet_model.py
|
""" Full assembly of the parts to form the complete network """
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
| 1,130 | 29.567568 | 63 |
py
|
airloc
|
airloc-master/segmentations/u_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 4)
self.down1 = Down(4, 8)
self.down2 = Down(8, 16)
self.down3 = Down(16, 32)
factor = 2 if bilinear else 1
self.down4 = Down(32, 64 // factor)
self.up1 = Up(64, 32 // factor, bilinear)
self.up2 = Up(32, 16 // factor, bilinear)
self.up3 = Up(16, 8 // factor, bilinear)
self.up4 = Up(8, 4, bilinear)
self.outc = OutConv(4, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
def get_latent_space_size(self):
noise = torch.randn((1,3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]))
# Go halfway through network and return latent shape
noise = self.inc(noise)
noise = self.down1(noise)
noise = self.down2(noise)
noise = self.down3(noise)
noise = self.down4(noise)
return list(noise.shape)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| 4,019 | 29.687023 | 122 |
py
|
airloc
|
airloc-master/segmentations/utils.py
|
from config import CONFIG
import math
import json
import os
import time
import zipfile
import gc
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split,Subset
import matplotlib
matplotlib.use('Agg')
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
import urllib
from dateutil.parser import parse
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import cross_entropy
# Run on init, setup colors for segmentation classes
CLASS_COLORS_LIST = ['#3C1098', '#8429F6', '#6EC1E4', '#FEDD3A', '#E2A929', '#9B9B9B','#000000' ]
CLASS_COLORS_TENSOR = torch.tensor([ to_rgb(c) for c in CLASS_COLORS_LIST])
"""
labels = [int(to_rgb(l)[0]*255) for l in labels]
cls = range(1,len(labels)+1)
labels = dict(zip(labels,cls))
"""
def map_ohe_to_color(seg_mask):
""" Maps a ohe feature map with C channels to a RGB image with specific colors for all classes"""
if len(seg_mask.shape) > 3:
# Get seg mask details
num_images, num_classes, im_H, im_W = seg_mask.shape
#num_classes = torch.max(seg_mask).item() + 1
# Convert class values to RGB image, permute to put channel dim infront of width and height
rgb_images = CLASS_COLORS_TENSOR[torch.argmax(seg_mask , dim = 1)].permute(0, 3, 1 , 2)
else:
num_images, im_H, im_W = seg_mask.shape
rgb_images = CLASS_COLORS_TENSOR[seg_mask].permute(0,3,1,2)
return rgb_images
def visualize_segmentation_est(val_seg , est_seg , vis_name = "visualization" , transform = None, max_images = 5):
val_seg = val_seg[0:max_images].cpu()
est_seg = est_seg[0:max_images].cpu()
max_images = min(max_images, val_seg.shape[0])
# DUe to ndindex being very weird and bad we need a litle hack
if max_images == 1:
max_images = 2
turn_off_last_row = True
else:
turn_off_last_row = False
fig, axes = plt.subplots(max_images, 2)
# Transform images from channel wise ohe to RGB images
rgb_val_seg = map_ohe_to_color(val_seg)
# Check if still ohe
if len(est_seg.shape) > 3:
est_seg = torch.argmax(est_seg, dim = 1)
rgb_est_seg = map_ohe_to_color(est_seg)
for (i, axis_inds) in enumerate(np.ndindex(axes.shape)):
ix, iy = axis_inds
if turn_off_last_row and ix == 1:
axes[ix,iy].axis('off')
continue
curr_img = rgb_val_seg[ix,:] if iy == 0 else rgb_est_seg[ix,:]
axes[ix,iy].imshow(curr_img.permute(1,2,0))
if (ix,iy) == (0,0):
axes[ix,iy].set_title('Ground Truth')
if (ix,iy) == (0,1):
axes[ix,iy].set_title('Estimate Segmentation')
plt.savefig(os.path.join(CONFIG.STATS_SEG_log_dir , vis_name) + '.png')
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def format_labels_for_loss(labels):
# If input is single channel, transform to ohe
if labels.shape[1] == 1:
n_classes = int(torch.max(labels).item()) + 1
label_out = torch.zeros((labels.shape[0] , n_classes , labels.shape[2],labels.shape[3]))
for i in range(n_classes):
label_out[:,i,:,:] = ((labels == i) * 1.0).squeeze(1)
label_out = label_out.long()
else:
label_out = labels
return label_out.to(CONFIG.device)
def compute_loss(est_mask , label_mask):
# For now only compute cross_entropy loss
loss = cross_entropy(est_mask , label_mask)
# TODO - Add boundary separation loss
return loss
def mask_image(image_batch, grid_size = 8):
p = 0.5
masked_batch = image_batch.clone().detach()
n_up = int(CONFIG.MISC_im_size[0]/grid_size)
for i_img in range(masked_batch.shape[0]):
masked = (np.random.uniform(size = (8,8)) > p )
#masked = np.resize(masked,CONFIG.MISC_im_size)
masked = masked.repeat(n_up, axis=0).repeat(n_up, axis=1)
masked_batch[i_img] = masked_batch[i_img] * torch.tensor(masked)
return masked_batch
# DICE Loss
class DiceLoss(nn.Module):
def __init__(self, class_weights = None):
super(DiceLoss, self).__init__()
self.class_weights = class_weights
if isinstance( self.class_weights, list):
self.class_weights = torch.tensor(self.class_weights)
self.class_weights = self.class_weights.to(CONFIG.device)
def forward(self, inputs , targets):
"""
Calculate Dice loss between inputs and targets assumed to be BxCxHxW
"""
if inputs.max() > 1 or inputs.min() < -1:
inputs = torch.sigmoid(inputs)
numerator = 2 * (inputs * targets).flatten( start_dim = 2 ).sum( dim = 2)
denominator = (inputs.sum(dim = (2,3)) + targets.sum(dim = (2,3)))
dice_loss = (numerator + 1.0) / (denominator + 1.0)
if not self.class_weights is None:
dice_loss = self.class_weights * dice_loss
return -dice_loss.mean()
def debug_plot(im):
matplotlib.use('TkAgg')
plt.imshow(im.permute(1,2,0))
plt.show()
| 5,241 | 27.802198 | 114 |
py
|
airloc
|
airloc-master/segmentations/unet_parts.py
|
""" Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| 2,602 | 32.371795 | 122 |
py
|
airloc
|
airloc-master/segmentations/networks.py
|
import random
from config import CONFIG
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from utils.utils import calculate_cnn_output_size
class BakeNet(nn.Module):
def __init__(self, n_out_chan = 2):
super(BakeNet ,self).__init__()
# network output is always in One Hot Encoding format
# Very simple demonstration network
modules = []
modules.append( nn.Conv2d(3 , 16 , 3 , 1 , padding = 1))
modules.append( nn.ReLU())
modules.append( nn.BatchNorm2d( 16 ))
modules.append( nn.Conv2d( 16 , 32 , 3 , 1 , padding = 1))
modules.append( nn.ReLU())
modules.append( nn.BatchNorm2d( 32))
modules.append( nn.Conv2d( 32 , 16 , 3 , 1 , padding = 1))
modules.append( nn.ReLU())
modules.append( nn.BatchNorm2d(16))
modules.append( nn.Conv2d( 16 , n_out_chan , 3, 1 , padding = 1))
modules.append(nn.Softmax(dim = 1))
self.seq = nn.Sequential(*modules)
def forward(self, x):
return self.seq(x)
def get_latent_space_size(self):
return 0 # Not clear latent space
| 1,229 | 25.170213 | 73 |
py
|
airloc
|
airloc-master/segmentations/train.py
|
import os
import torch
import random
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.nn as nn
import json
import numpy as np
from torchinfo import summary
from shutil import copyfile
from utils.utils import get_random_crops, get_deterministic_crops, load_normalize_data
from segmentations.utils import compute_loss, format_labels_for_loss, visualize_segmentation_est
from segmentations.utils import DiceLoss
from segmentations.networks import BakeNet
from segmentations.u_net import UNet
import torch.optim as optim
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from utils.stat_collector import StatCollector
from config import CONFIG
# Set all hyperparameters here to not clutter the config file
try:
netType = 'UNet'
seed = 0
batch_size = 64
epochs = 10000
multiply_images = 2
optimizer_type = 'adam'
learning_rate = 1e-4
lower_learning_rate_factor = 0.33
momentum = 0.95
beta1 = 0.9
beta2 = 0.999
print_iter = 100
vis_iter = 125
# Set seeds
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
if not CONFIG.RL_priv_use_seg:
raise(Exception("ERROR: segmentation information not enabled!"))
info = dict([
("NetType" , netType),
("Completed" , False),
("Metrics", [
"Loss",
"ValLoss",
"Accuracy",
"ValAccuracy"
]),
("LogDir" , None),
("Blocks" , [1 ,1 , 1]),
("NbrParameters" , 0),
("LatentSpaceSize" , 0),
("NbrImagesInTrain", 0),
("NbrImagesInVal" , 0)
])
metrics = info['Metrics']
# Function used to update leanring rate
def update_learning_rate(optimizer , learning_rate):
for params in opimizier.param_groups:
params['lr'] = learning_rate
# Transform all input images
loadTransform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(CONFIG.MISC_patch_size),
transforms.Normalize((0.5,0.5,0.5) , (0.5,0.5,0.5))
])
# Find device
device = torch.device("cuda:0" if CONFIG.MISC_use_gpu and torch.cuda.is_available() else "cpu")
CONFIG.device = device
# Load data
trainloader,valloader = load_normalize_data(download = False, batch_size = batch_size , multiply_images = multiply_images)
valiter = iter(valloader)
info['NbrImagesInTrain'] = len(trainloader) * batch_size
info['NbrImagesInVal'] = len(valloader) * batch_size
tot_itr = 0
tot_nbr_itr = epochs * len(trainloader)
if netType == 'BakeNet':
net = BakeNet()
elif netType == 'UNet':
net = UNet( n_channels = 3, n_classes = 2)
else:
raise(Exception("Unkown network type"))
# Count number of paramters
net_parameters = filter(lambda p: p.requires_grad, net.parameters())
info['NbrParameters'] = int(sum([np.prod(p.size()) for p in net_parameters]))
info['LatentSpaceSize'] = net.get_latent_space_size()
# Set optimizer
if optimizer_type == 'sgd':
optimizer = optim.SGD(net.parameters() , lr = learning_rate, momentum = momentum)
elif optimizer_type == 'adam':
optimizer = optim.Adam(net.parameters() , lr = learning_rate, betas =(beta1 , beta2))
else:
raise(Exception("Unknown optimizer type:\t%s" % optimizer_type))
# Select loss function
class_weights = [1,3]
#criterion = nn.MSELoss()
criterion = DiceLoss(class_weights)
# Setup stat collector
log_dir_name = os.path.basename(CONFIG.STATS_log_dir)
CONFIG.STATS_SEG_log_dir = os.path.join(CONFIG.MISC_project_root_path, "segmentations", "logs", log_dir_name)
os.makedirs(CONFIG.STATS_SEG_log_dir)
sc = StatCollector(CONFIG.STATS_SEG_log_dir, tot_nbr_itr, print_iter = print_iter)
for metric in metrics:
sc.register(metric , {'type':'avg','freq':'step'})
# Enter log dir and write to file
info['LogDir'] = CONFIG.STATS_SEG_log_dir
with open(os.path.join(CONFIG.STATS_SEG_log_dir, "info.json") , 'w') as io:
json.dump(info, io , indent = 4)
# Construct noise for network summary
noise = torch.randn(1,3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1])
# Print model summary to separate file
with open(os.path.join(CONFIG.STATS_SEG_log_dir , "network_summary") , 'w') as io:
print(summary(net, input_data = noise, verbose=0), file = io)
# Save all fileus
copyfile("segmentations/train.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "train.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "config.py"))
copyfile("segmentations/networks.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "networks.py"))
copyfile("segmentations/u_net.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "u_net.py"))
copyfile("segmentations/utils.py" , os.path.join(CONFIG.STATS_SEG_log_dir, "utils.py"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
with open(stat_path) as json_file:
stats = json.load(json_file)
dataset_means = torch.tensor(stats['means'][:3])
dataset_stds = torch.tensor(stats['stds'][:3])
unNormImage = transforms.Normalize( ( - dataset_means / dataset_stds).tolist() , (1.0 / dataset_stds).tolist() )
# Decrease learning rate every 20 epoch
current_learning_rate = learning_rate
def update_learning_rate(optimizer , learning_rate):
for params in optimizer.param_groups:
params['lr'] = learning_rate
net = net.to(device)
for epoch in range(epochs):
for (i,data) in enumerate(trainloader):
batch_images , (start_crops_ , goal_crops_) = data
batch_images = batch_images
# Get random crops
crops , _ = get_random_crops(batch_images)
# The input is the RGB image
inpt = crops[:,0:3,:,:].to(device)
# The ground truth are the segmentation mask
labels = crops[:,3:,:,:].to(device)
# Labels need to be transformed to correct format
labels = format_labels_for_loss(labels)
outputs = net(inpt)
loss = criterion(outputs, labels)
#loss = compute_loss(outputs , labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sc.s('Loss').collect(loss.item())
# Calculate and logg pixelwise accuracy
acc = (torch.argmax(labels, dim = 1) == torch.argmax(outputs, dim = 1)).float().mean()
sc.s('Accuracy').collect(acc.item())
with torch.no_grad():
# Run one batch on the valloader aswell
try:
val_images, (start_crops_ , goal_crops_) = next(valiter)
except:
valiter = iter(valloader)
val_images, ( start_crops_ , goal_crops_) = next(valiter)
val_images = val_images.to(device)
# Due to how the dataloading is devised the valloader has the same multiply images
# as the trainloader. During validation fixed crops are used therefore
# there is no need to run same image with same patch multiple times
val_images = val_images[0:-1:multiply_images ,:]
val_crops , _ = get_deterministic_crops(val_images, coords = start_crops_)
val_inpt = val_crops[:,0:3,:,:].to(device)
val_labels = val_crops[:,3:,:,:].to(device) # new added extra channel for image boundaries
val_labels = format_labels_for_loss(val_labels)
val_outs = net(val_inpt).to(device)
val_loss = criterion(val_outs, val_labels)
# Logging
sc.s('ValLoss').collect(val_loss.item())
val_acc = (torch.argmax(val_labels, dim = 1) == torch.argmax(val_outs, dim = 1)).float().mean()
sc.s('ValAccuracy').collect(val_acc.item())
if tot_itr % print_iter == 0 or tot_itr == tot_nbr_itr - 1:
print("Iteration:\t%d / %d" % ( tot_itr, tot_nbr_itr))
sc.print()
sc.save()
# Do visualize
if tot_itr % vis_iter == 0 or tot_itr == tot_nbr_itr - 1:
visualize_segmentation_est(val_labels , val_outs , vis_name = "vis_%d" % tot_itr)
tot_itr += 1
# Lower the learning rate
if (epoch + 1) % 10 == 0 and False:
current_learning_rate *= lower_learning_rate_factor
update_learning_rate(optimizer, current_learning_rate)
info['Completed'] = True
with open(os.path.join(CONFIG.STATS_SEG_log_dir, "info.json") , 'w') as io:
json.dump(info , io , indent = 4)
finally:
while(True):
user = input("\nSave model [yes/no]\n")
if user == 'y':
# Save the encoder and the decoder
torch.save(net.state_dict() , os.path.join(CONFIG.STATS_SEG_log_dir, "final_unet"))
print("Model Saved")
break
elif user == 'n':
print("Model Not Saved")
break
print("\nTraining Completed!\n")
| 9,545 | 32.261324 | 126 |
py
|
airloc
|
airloc-master/segmentations/logs/sem-seg-model/u_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import CONFIG
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 4)
self.down1 = Down(4, 8)
self.down2 = Down(8, 16)
self.down3 = Down(16, 32)
factor = 2 if bilinear else 1
self.down4 = Down(32, 64 // factor)
self.up1 = Up(64, 32 // factor, bilinear)
self.up2 = Up(32, 16 // factor, bilinear)
self.up3 = Up(16, 8 // factor, bilinear)
self.up4 = Up(8, 4, bilinear)
self.outc = OutConv(4, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
def get_latent_space_size(self):
noise = torch.randn((1,3, CONFIG.MISC_patch_size[0] , CONFIG.MISC_patch_size[1]))
# Go halfway through network and return latent shape
noise = self.inc(noise)
noise = self.down1(noise)
noise = self.down2(noise)
noise = self.down3(noise)
noise = self.down4(noise)
return list(noise.shape)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
| 4,019 | 29.687023 | 122 |
py
|
airloc
|
airloc-master/utils/agent_utils.py
|
import math
import os
import time
import zipfile
import warnings
import gc
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch.distributions import MultivariateNormal, OneHotCategorical
import matplotlib
import matplotlib.pyplot as plt
from torch.nn import CrossEntropyLoss
from config import CONFIG
from utils.utils import move_crop , get_deterministic_crops , compute_iou ,\
get_random_crops , get_frac_outside , visualize_trajectory , \
get_crop_distance , check_outside , project_into_image
def normalize_batch_weights(batch_weights , batch_dists):
if CONFIG.RL_batch_size != 1 or CONFIG.RL_multiply_images != 1:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
batch_weights = normalize_grid(batch_weights, batch_dists)
return batch_weights
def normalize_grid(batch_weights,batch_dists):
# Due to pytorch lacking nanstd we have to convert to numpy to do te tings
batch_dists_np = batch_dists.cpu().detach().numpy()
batch_weights_np = batch_weights.cpu().detach().numpy()
n_groups = 5
step = 1
lwr,upr = 0,step
for i in range(n_groups):
idx = np.all([(batch_dists_np <= upr) , (batch_dists_np >lwr)],axis =
0)
# Calculate nanstd separatly to make sure that it is'nt zero anywhere
nanstd = np.nanstd(batch_weights_np[idx] )
if nanstd == 0.0:
nanstd = 1
# Normalize weights for each step of the agent separatly
batch_weights_np[idx] = (batch_weights_np[idx] - np.nanmean(batch_weights_np[idx])) / nanstd
# Move to the next set of distances
lwr += step
upr += step
# Handle the largest as one group
idx = batch_dists_np >lwr
# Calculate nanstd separatly to make sure that it is'nt zero anywhere
nanstd = np.nanstd(batch_weights_np[idx] , axis = 0)
if nanstd == 0.0:nanstd = 1
# Normalize weights for each step of the agent separatly
batch_weights_np[idx] = (batch_weights_np[idx] - np.nanmean(batch_weights_np[idx])) / nanstd
# Convert back to tensor and send to device
batch_weights = torch.from_numpy(batch_weights_np).to(CONFIG.device)
return batch_weights
def get_policy(agent_net, episode):
# If softmax agent is enabled the polich is now a distribution over 8 different
# directions in which the agent can move.
# Get the output of the agent
output, softmax_embedding = agent_net(episode)
# Create policy distribution
policy = OneHotCategorical(probs = output)
return policy, softmax_embedding
def get_action(agent_net , episode, deterministic = False):
if deterministic:
action , softmax_embedding = agent_net(episode)
return action, softmax_embedding
else:
policy , softmax_embedding = get_policy(agent_net, episode)
samp = policy.sample()
if not CONFIG.RL_agent_allowed_outside:
outside = get_outside(episode).to(CONFIG.device)
if (samp * outside).sum() == 1 :
samp = policy.sample()
return samp, softmax_embedding
def get_outside(episode):
outside = torch.zeros([1,8])
x,y = episode.locs[episode.step,:2]/CONFIG.MISC_step_sz
if x == 0:
outside[0,7] = 1
outside[0,:2] = 1
if y == 0:
outside[0,5:] = 1
if x == 4:
outside[0,3:6] = 1
if y == 4:
outside[0,1:4] = 1
return outside
def map_action_to_move(action):
""" Maps the action which is a one hot encoded vector to a move in pixels."""
# This will be the move in pixels
c = torch.argmax(action).item()
step_sz = int(CONFIG.RL_softmax_step_size * CONFIG.MISC_patch_size[0])
# Translate selected action to a pixelwise move.
# Remeber, increasing y coordinate means moving down in image
if c == 0:
dx , dy = 0,-1 # Move up
elif c == 1:
dx , dy = 1 , -1 # Move up right
elif c == 2:
dx , dy = 1 , 0 # Move right
elif c == 3:
dx , dy = 1 , 1 # Move down right
elif c == 4:
dx , dy = 0 , 1 # Move down
elif c == 5:
dx , dy = -1 , 1 # Move down left
elif c == 6:
dx , dy = -1 , 0 # Move left
elif c == 7:
dx , dy = -1 , -1 # Move up left
else:
raise(Exception("Invalid action:\t%d" % c))
move = torch.tensor([dy , dx])
# Now we have direction, multiply with patch size to get correct distance
# Also hyperparameter to control step size
move = step_sz * move
return move
def take_step(action , episode, softmax_embedding=None):
# Calculate the new location
action_in = action
# The action is a oneHotEncoding of in which direction the agent should move
# Map the action to a move in (dx,dy) and add to previous position
move = map_action_to_move(action)[None,:]
loc_next = episode.loc_current.clone().detach()
loc_next[0,0:2] += move[0,:]
# Calculate the reward for this action
reward = get_reward(loc_next, episode, action_in)
# Check if the episode has been completed
done = check_if_done(loc_next, episode)
return loc_next, reward, done
def check_if_done(loc_next , episode):
# If overlap with goal is significant we are done
iou = compute_iou(loc_next, episode.loc_goal ).item()
done = iou >= CONFIG.RL_done_iou
# If we have reached the maximum number of steps the episode has ended
return done or (episode.step + 1 >= CONFIG.RL_max_episode_length)
def get_reward(loc_next, episode, action):
# Rewards are partially based on distances
prev_dist = get_crop_distance(episode.loc_current[0], episode.loc_goal[0])
next_dist = get_crop_distance(loc_next[0], episode.loc_goal[0])
# TODO: Add max dist which is in regard to the goal and start patches
max_dist = np.sqrt(np.prod(np.array(CONFIG.MISC_im_size) - np.array(CONFIG.MISC_patch_size)))
iou = compute_iou(loc_next , episode.loc_goal).item()
if iou > 0.2:
reward = CONFIG.RL_reward_step + CONFIG.RL_reward_iou_scale * iou
else:
reward = CONFIG.RL_reward_step
if iou > CONFIG.RL_done_iou:
reward += CONFIG.RL_reward_goal
elif episode.step + 1 >= CONFIG.RL_max_episode_length:
reward += CONFIG.RL_reward_failed
if ( prev_dist > next_dist):
reward += CONFIG.RL_reward_closer
if CONFIG.RL_reward_distance:
reward += CONFIG.RL_reward_goal*(max_dist - next_dist)/ max_dist
return reward
def update_net(batch , agent_net, optimizer, entropy_bonus = None):
loss = 0
# Log the entropy of taken action
entropy_taken_actions = torch.zeros(CONFIG.RL_batch_size * CONFIG.RL_multiply_images * CONFIG.RL_max_episode_length)
action_counter = 0
eps_counter = 0
# Get one trajectory, calculate loss for each time step and add to global loss
for ep_id in range(batch.idx):
eps_counter += 1
for step_id in range(1 , batch.steps[ep_id].int() + 1):
# Get the episode, the action and the weight
ep , action , weight = batch.get_episode(ep_id , step_id)
# Get the corresponding policy
policy , softmax_embedding = get_policy(agent_net , ep)
# Get log probability of taken action
logp = policy.log_prob(action)
# Add to loss with weight
loss -= logp * weight
# Calculate entropy for logging (and possibly for entropy bonus)
# entropy = - policy.probs * policy.logits
entropy = policy.entropy()
entropy_taken_actions[action_counter] = entropy
if entropy_bonus is not None and entropy_bonus != 0:
loss -= entropy_bonus * entropy
action_counter += 1
# If the agent is of type RNN reset the hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Update the network with the correct frequency
if CONFIG.RL_nbr_eps_update == eps_counter:
loss = loss / action_counter
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch.sc.s('Loss').collect( loss.item() )
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
loss = 0
action_counter = 0
eps_counter = 0
if (CONFIG.RL_nbr_eps_update //2) <= eps_counter or (batch.idx == eps_counter ):
loss = loss / action_counter
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch.sc.s('Loss').collect( loss.item() )
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
loss = 0
action_counter = 0
else:
pass
#print("Skipping batch with %d episodes" % eps_counter)
def compute_loss(batch , agent_net, entropy_bonus = None):
loss = 0
# Log the entropy of taken action
entropy_taken_actions = torch.zeros(CONFIG.RL_batch_size * CONFIG.RL_multiply_images * CONFIG.RL_max_episode_length)
action_counter = 0
# Get one trajectory, calculate loss for each time step and add to global loss
for ep_id in range(batch.idx):
for step_id in range(1 , batch.steps[ep_id].int() + 1):
# Get the episode, the action and the weight
ep , action , weight = batch.get_episode(ep_id , step_id)
# Get the corresponding policy
policy , softmax_embedding = get_policy(agent_net , ep)
# Get log probability of taken action
logp = policy.log_prob(action)
# Add to loss with weight
loss -= logp * weight
# Calculate entropy for logging (and possibly for entropy bonus)
# entropy = - policy.probs * policy.logits
entropy = policy.entropy()
entropy_taken_actions[action_counter] = entropy
if entropy_bonus is not None and entropy_bonus != 0:
loss -= entropy_bonus * entropy
action_counter += 1
# If the agent is of type RNN reset the hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Log the entropy
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
loss = loss / action_counter
batch.sc.s('Loss').collect( loss.item() )
return loss
def map_grid_dist_to_ohe( grid_dist):
ohe = torch.zeros((1, 8))
c = torch.zeros((1))
#dist_diag = grid_dist * torch.tensor([[1],[1]]) / 1.4142
#dist_diag_2 = grid_dist * torch.tensor([1,-1]) / 1.4142
# For now correct step is diagonal if possible
# grid_dist = dy , dx
if grid_dist[0] < 0 and grid_dist[1] == 0:
c[0] = 0 # up
elif grid_dist[0] < 0 and grid_dist[1] > 0:
c[0] = 1 # right up
elif grid_dist[0] == 0 and grid_dist[1] > 0:
c[0] = 2 # right
elif grid_dist[0] > 0 and grid_dist[1] > 0:
c[0] = 3 # right down
elif grid_dist[0] > 0 and grid_dist[1] == 0:
c[0] = 4 # down
elif grid_dist[0] > 0 and grid_dist[1] < 0:
c[0] = 5 # left down
elif grid_dist[0] == 0 and grid_dist[1] < 0:
c[0] = 6 # left
elif grid_dist[0] < 0 and grid_dist[1] < 0:
c[0] = 7
else:
raise(Exception("Invalid action:\t%s" % grid_dist))
return c.long()
"""
def compute_loss(batch , agent_net, entropy_bonus = None):
loss = 0
# Log the entropy of taken action
entropy_taken_actions = torch.zeros(CONFIG.RL_batch_size * CONFIG.RL_multiply_images * CONFIG.RL_max_episode_length)
action_counter = 0
# Get one trajectory, calculate loss for each time step and add to global loss
for ep_id in range(batch.idx):
for step_id in range(1 , batch.steps[ep_id].int() + 1):
# Get the episode, the action and the weight
ep , action , weight = batch.get_episode(ep_id , step_id)
# Get the corresponding policy
policy = get_policy(agent_net , ep)
# Get log probability of taken action
logp = policy.log_prob(action)
# Add to loss with weight
loss -= logp * weight
# Calculate entropy for logging (and possibly for entropy bonus)
# entropy = - policy.probs * policy.logits
entropy = policy.entropy()
entropy_taken_actions[action_counter] = entropy
if entropy_bonus is not None and entropy_bonus != 0:
loss -= entropy_bonus * entropy
action_counter += 1
# If the agent is of type RNN reset the hidden states
if agent_net.AGENT_TYPE == 'RNN':
agent_net.reset()
# Log the entropy
batch.sc.s('Entropy').collect(entropy_taken_actions[0:action_counter].mean().item())
return loss/batch.idx
"""
"""
Calculates the rewards from step until finish given the reward of the trajectory.
"""
def rewards_to_go(rewards):
rtg = torch.zeros_like(rewards).to(CONFIG.device)
for i in range(len(rewards)):
# First get gamma
discount = torch.pow(CONFIG.RL_discount_factor , torch.arange(0 , len(rewards)-i)).to(CONFIG.device)
rtg[i] = torch.sum( rewards[i:] * discount)
# Normalize per action here?
# Or in main loop?
return rtg
""" Run a trajectory in a search area """
def run_eval_trajectory(image,episode, agent_net, deterministic = CONFIG.RL_eval_deterministic, loc_start = None, loc_goal = None, probs_diff = None):
episode.initialize(image = image , loc_start=loc_start, loc_goal = loc_goal, probs_diff = probs_diff)
# Execute episode
done = False
while not done:
# Get an action from the agent
action, softmax_embedding = get_action(agent_net, episode, deterministic)
# Update the environment according to the correct action
loc_next, reward, done = take_step(action, episode, softmax_embedding)
# Get the crop at the current location
crop_current, loc_current = get_deterministic_crops(image, coords=loc_next[0])
# Update the episode storage
try:
tmp = torch.nn.Softmax( dim = 1 )(softmax_embedding).cpu().detach().numpy()
except:
tmp = np.zeros((1, 8))
episode.update(action, reward, loc_current, crop_current, misc=tmp)
# Episode done return results
episode.finish()
return episode
""" Used to freeze or unfreeze parts of the network """
def set_freezed_parts_of_net(net , mode = 'none'):
# Mode determines which parts should be froozen
# mode = 'patch' - Freezes patch embedder everything else unfroozen
# mode = 'policy' - Freezes all that is not the patch embedder
# mode = 'none' - unfreezes all parts of the network
for child in net.children():
if child == self.patch_emb:
if mode == 'patch':
for parameter in child.parameters():
parameter.requires_grad = False
else:
parameter.requires_grad = True
else:
if mode == 'policy':
for parameter in child.parameters():
parameter.requires_grad = False
else:
parameter.requires_grad = True
def visualize_cnn_filter(conv_layer, filter = 0, save_name = 'filter_vis.png', show = True):
""" Plots the weights of a filter in a convolutional layer."""
input_channels_ploted = min( 16 , conv_layer.weight.shape[1])
filter = conv_layer.weight[filter,:]
filter_ = filter[0:input_channels_ploted,:,:].detach().clone().cpu().permute(1,2,0).numpy()
n_rows = int(math.sqrt(input_channels_ploted))
n_cols = int( input_channels_ploted / n_rows) + int( input_channels_ploted % n_rows != 0)
matplotlib.use('TkAgg') if show else None
fig, axes = plt.subplots(n_rows , n_cols)
for (i , ax_inds) in enumerate(np.ndindex(axes.shape)):
axes[ax_inds].imshow(filter_[:,:,i])
axes[ax_inds].set_title("Input Channel %d" % i)
if show:
plt.show()
if False: # for now....
plt.savefig(os.path.join(CONFIG.STATS_log_dir, "filter_visualizations", save_name))
# Clean up
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
| 16,432 | 31.158513 | 150 |
py
|
airloc
|
airloc-master/utils/dataset_utils.py
|
import os
import random
import torch
from PIL import Image
import pandas as pd
from torch.utils.data.dataset import Dataset
from torchvision import transforms
import torchvision.transforms.functional as F
import numpy as np
import glob
from config import CONFIG
import argparse
import sys
import time
class DubaiSeven(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "dubai_seven")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
if transform is None:
self.tt = transforms.ToTensor()
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
else:
image = self.tt(image)
pass
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class MasaFilt(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "masa_filt")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# TODO Fix split_images.py
# Due to the labels containing some smoothed pixels we do a round to nereast integer here
seg = torch.round(seg).float()
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Round labels in the image mask. They are smoothed by the interpolation
if CONFIG.RL_priv_use_seg:
seg = image[-1,:,:]
seg = torch.round(seg).float()
image[-1,:,:] = seg
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class MasaSeven(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "masa_seven")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
if transform is None:
self.tt = transforms.ToTensor()
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
else:
image = self.tt(image)
pass
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class Masa(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False):
dataset_root_path = os.path.join(dataset_root_path , "masa")
# If grid game enabled laod that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)[0,:,:][None,:]
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
class Dubai(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split =
'train' , transform = None , generate_new_split_file = False,
use_eval_split = False):
dataset_root_path = os.path.join(dataset_root_path , "dubai")
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for Massachusetts:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
self.base_seg_dir = os.path.join(dataset_root_path , "label")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
# Load image segmentation
seg_path = os.path.join(self.base_seg_dir , self.image_list[idx])
seg = Image.open(seg_path)
# Transform from tif image to a pytorch tensor
seg = self.seg_transform(seg)
image = self.seg_transform(image)
# Concatenate segmentation mask to end of crops
image = torch.cat((image , seg ) , dim = 0)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
# Hmm maybe should inherit ImageFolder... but decided not to
class CustomDataset():
def __init__(self, datasets_root_path , custom_dataset_name,split = 'train' , transform = None, custom_split_file = None):
dataset_root_path = os.path.join(datasets_root_path , "Custom", custom_dataset_name)
if not os.path.exists(datasets_root_path):
os.makedirs(datasets_root_path)
# Try to locate this specific custom dataset
if not os.path.exists(dataset_root_path):
print("Unable to find this dataset:\t%s" % custom_dataset_name)
exit(1)
if custom_split_file is None:
# No custom split file selected use standard split file
# If grid game enabled laod that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
else:
# Use custom split file
partition_file_path = os.path.join(dataset_root_path , custom_split_file)
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image" )
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir , self.image_list[idx] )
image = Image.open(image_path)
# If transformer available use it
if self.transform is not None:
image = self.transform(image)
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , (start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = os.path.join(self.base_dir ,self.image_list[idx])
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img ,filepath )
class MasaFull(Dataset):
def __init__(self, datasets_root_path , split = 'train' , transform = None , randomRotation = False ):
dataset_root_path = os.path.join(datasets_root_path , "masa_full")
self.randomRotation = randomRotation
if not os.path.exists(datasets_root_path):
os.makedirs(datasets_root_path)
# Also pngs are available
image_folder_path = os.path.join(dataset_root_path, "tiff")
# Dataset is already split no need for split file
if split == 'train':
image_folder_path = os.path.join(image_folder_path , "train")
elif split == 'val':
image_folder_path = os.path.join(image_folder_path , "val")
elif split == 'test':
image_folder_path = os.path.join(image_folder_path , "test")
else:
raise(Exception("Unknown split:\t%s" % split))
# File names
self.image_list = [os.path.join(image_folder_path , x) for x in os.listdir(image_folder_path)]
# Random Crop
self.rc = transforms.RandomCrop(size = [500 , 500])
# from PIL to tensor transform
self.tt = transforms.ToTensor()
self.transform = transform
def __getitem__(self, idx):
# Load the image
image = Image.open(self.image_list[idx])
# Make it tensor
#image = self.tt(image)
# If random rotation is enabled. do it
if self.randomRotation:
#angle = torch.rand(size = (1,)).item() * 90.0
angle = torch.randint( low = -180 , high = 180 , size = (1,)).float().item()
image = F.rotate(image , angle , fill = None)
# Get center crop dimensions
while ( angle < 0 or angle > 90):
angle = angle + 90 if angle < 0 else angle
angle = angle - 90 if angle > 90 else angle
size = int( image.size[-1] / (np.cos(np.radians(angle)) + np.sin(np.radians(angle))) )
image = F.center_crop(image , [size , size])
# Now do random 500x500 crop
image = self.rc(image)
# Do regular data augmentation if available
if self.transform:
image = self.transform(image)
else:
image = self.tt(image)
# No start and goal crops. Only for show
return (image, ([],[]))
def __len__(self):
return len(self.image_list)
class ImagesPre(Dataset):
def __init__(self, dataset_root_path = CONFIG.MISC_dataset_path , split = 'train' , transform = None , generate_new_split_file = False, use_eval_split = False, post_instead = False):
if post_instead:
dataset_root_path = os.path.join(dataset_root_path , "images_post")
else:
dataset_root_path = os.path.join(dataset_root_path , "images_pre")
# Check if we are to use the special eval_split_file
if use_eval_split == 'basic':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_distance.csv" % split)
split_frame = pd.read_csv(partition_file_path)
elif use_eval_split == 'exhaustive':
partition_file_path = os.path.join(dataset_root_path , "%s_eval_partition_grid_fixed_start.csv" % split)
split_frame = pd.read_csv(partition_file_path)
# Check so that saved patches matches with current settings
#assert split_frame['patch_x'][0] == CONFIG.MISC_patch_size[0] , "
else:
# If grid game enabled load that file
if CONFIG.MISC_grid_game:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition_grid.csv")
else:
partition_file_path = os.path.join(dataset_root_path , "list_eval_partition.csv")
# Check if it exists
if not os.path.exists(partition_file_path):
raise(FileNotFoundError("Split file not found:\t%s" % partition_file_path))
exit(1)
split_frame = pd.read_csv(partition_file_path)
# Make correct split
if split == 'train':
split_frame = split_frame.loc[split_frame['partition'] == 0]
elif split == 'val':
split_frame = split_frame.loc[split_frame['partition'] == 1]
elif split == 'test':
split_frame = split_frame.loc[split_frame['partition'] == 2]
else:
print("Unknown split selected for ImagesPre:\t%s" % split)
exit(1)
self.image_list = split_frame['image_id'].tolist()
self.start_crop = split_frame[['start_x', 'start_y']].to_numpy()
self.goal_crop = split_frame[['goal_x' , 'goal_y']].to_numpy()
# Allow for image preprocessing by transformer
self.transform = transform
self.base_dir = os.path.join(dataset_root_path , "image")
# If we are to laod image segmentations aswell prepare a transform
if CONFIG.RL_priv_use_seg:
self.seg_transform = transforms.Compose([
transforms.ToTensor(),
])
def getitem__by_image_id(self, image_id):
"""
Only used for debbugging.
"""
ids , start = [] , 0
try:
while (True):
id = self.image_list.index(image_id, start)
ids.append(id)
start = id + 1
except:
# Throws an error when no more availble
pass
if len(ids) == 0:
raise(Exception("No such image"))
image_path = os.path.join(self.base_dir, self.image_list[ids[0]])
#image = np.array(Image.open(image_path))
image = Image.open(image_path)
locs = [self.__getitem__(id)[1] for id in ids]
return image , np.stack(locs)
def __getitem__(self, idx):
image_path = os.path.join(self.base_dir, self.image_list[idx])
image = Image.open(image_path)
if CONFIG.RL_priv_use_seg:
print("No semantic masks for this dataset")
sys.exit(0)
# If transformer available use it
if self.transform is not None:
#print("AAA", image.shape, np.min(image), np.max(image))
image = self.transform(image)
#print(type(image))
#print("BBB", torch.min(image), torch.max(image))
#time.sleep(999)
# Round labels in the image mask. They are smoothed by the interpolation
if CONFIG.RL_priv_use_seg:
seg = image[-1,:,:]
seg = torch.round(seg).float()
image[-1,:,:] = seg
# Get start and goal crop
start_crop = self.start_crop[idx, :]
goal_crop = self.goal_crop[idx ,:]
return (image , ( start_crop , goal_crop))
def __len__(self):
return len(self.image_list)
def _get_image_and_file(self, idx):
filepath = self.image_list[idx]
img = Image.open(filepath)
if self.transform is not None:
img = self.transform(img)
return (img , filepath )
def _generate_split_file( split_file_path , grid_game = False):
# Generates a separate csv file for the split with crop locations
# Useful for creating a static validation set (or training if you want)
# Read partition file
# list_eval_partition: 0: training, 1: validation, 2:test
try:
dataframe = pd.read_csv(split_file_path)
except FileNotFoundError:
raise(FileNotFoundError("Eval partion file not found:\t%s" % split_file_path ))
# Generate start and goal crops, insert into dataframe and write back to file
N = dataframe.shape[0]
H_img , W_img = CONFIG.MISC_im_size
H_patch , W_patch = CONFIG.MISC_patch_size
# To make sure crops always are in same location use separate random number generator with hardcoded seed
num_gen = np.random.default_rng(0)
# If the grid_game flag is enabled divide image into uniform grid and sample patch location from that gird
if grid_game:
grid_H , grid_W = int(H_img / H_patch) , int(W_img / W_patch)
grid_locs = np.floor( np.random.uniform( low = [0,0, 0,0] , high = [grid_H , grid_W, grid_H ,grid_W] , size = (N, 4)))
# TODO - Add so that start and goal does not get on same patch
#number_same_patch = ((grid_locs[:,0] == grid_locs[:,2]) & (grid_locs[:,1] == grid_locs[:,3])).sum()
# Convert from grid location to pixel location
crop_locs = (grid_locs * np.array(2*CONFIG.MISC_patch_size)).astype('int64')
#crop_locs = np.concatenate( (start_crop_loc , top_left_loc + np.array(CONFIG.MISC_patch_size)) ,axis = 1).astype('int64')
# Also alter split file name and append 'grid'
dirname = os.path.dirname(split_file_path)
filename = os.path.basename(split_file_path)
temp = filename.split('.') # Assumes only single dot in filename
new_filename = temp[0] + '_grid.' + temp[1]
split_file_path = os.path.join(dirname ,new_filename)
else:
# Else just sample any location in the image
# ( start_x , start_y , goal_x , goal_y)
crop_locs = num_gen.integers((0,0,0,0) , (H_img-H_patch , W_img-W_patch , H_img-H_patch , W_img - W_patch) , size = (N , 4))
# Insert into dataframe
dataframe['start_x'] = crop_locs[:,0]
dataframe['start_y'] = crop_locs[:,1]
dataframe['goal_x'] = crop_locs[:,2]
dataframe['goal_y'] = crop_locs[:,3]
# Write back!
dataframe.to_csv(split_file_path, index = False)
def _create_base_split_file( datasets_root_path = None, dataset = None, relative_image_path = None):
datasets_root_path = datasets_root_path if datasets_root_path is not None else CONFIG.MISC_dataset_path
dataset = dataset if dataset is not None else CONFIG.MISC_dataset
relative_image_path = relative_image_path if relative_image_path is not None else "image/*"
dataset_root_path = os.path.join(datasets_root_path , dataset)
image_list = glob.glob(os.path.join(dataset_root_path,relative_image_path))
# Make sure only to keep relative name
image_list = list(map(os.path.basename , image_list))
nbr_img = len(image_list)
partition = np.zeros(nbr_img)
partition[int(nbr_img*0.7):int(nbr_img*0.85)] += 1
partition[int(nbr_img*0.85):] +=2
random.shuffle(partition)
data = {"image_id":image_list,
"partition":partition}
dataframe = pd.DataFrame(data)
split_file_path = os.path.join(dataset_root_path,'list_eval_partition.csv')
dataframe.to_csv(split_file_path,index = False)
if __name__ == '__main__':
# This part is used to be able to simply generate split files for datasets
# Set random seed
random.seed(CONFIG.MISC_random_seed)
np.random.seed(CONFIG.MISC_random_seed)
torch.manual_seed(CONFIG.MISC_random_seed)
parser = argparse.ArgumentParser()
parser.add_argument("--dataset" , "-d", type = str, help = "Select dataset")
parser.add_argument("--grid-game" ,"-g" , action = 'store_true', default = False)
# DEBUGGING
parser.add_argument("--debug-masa-full" , action = 'store_true' , default = False)
parser.add_argument("--random-rotation" , action = 'store_true' , default = False)
args = parser.parse_args()
if args.debug_masa_full:
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
torch.manual_seed(0)
d_rot = MasaFull(".." , randomRotation = True)
d_no_rot = MasaFull(".." , randomRotation = False)
for i in range(10):
image_rot = d_rot.__getitem__(i)
image_no_rot = d_no_rot.__getitem__(i)
fig , axes = plt.subplots(1,2)
axes[0].imshow(image_rot.permute(1,2,0))
axes[1].imshow(image_no_rot.permute(1,2,0))
plt.show()
plt.close('all')
known_datasets = ['dubai','masa','masa_filt','masa_seven','dubai_seven', 'images_pre', 'images_post']
if not args.dataset is None:
# Check if custom or regular dataset
if (args.dataset.startswith('custom_')):
# Check that custom dataset actually existss
if not os.path.exists(os.path.join(CONFIG.MISC_dataset_path , "Custom", args.dataset[7:])):
print("Dataset does not exists:\t%s" % os.path.join("Custom", args.dataset[7:]))
exit(1)
_create_base_split_file(dataset = os.path.join("Custom", args.dataset[7:]) )
split_file_path = os.path.join(CONFIG.MISC_dataset_path, "Custom", args.dataset[7:], "list_eval_partition.csv")
elif args.dataset in known_datasets:
# Regular dataset
_create_base_split_file(dataset = args.dataset)
split_file_path = os.path.join(CONFIG.MISC_dataset_path , args.dataset , "list_eval_partition.csv")
else:
print("No dataset is found")
exit(1)
else:
print("Not a valid dataset!")
exit(1)
# then generate split file with random start and goal crops
_generate_split_file(split_file_path , args.grid_game)
| 37,541 | 36.617234 | 186 |
py
|
airloc
|
airloc-master/utils/utils.py
|
import math
import json
import os
import time
import zipfile
import signal
import pdb
import gc
from shutil import copyfile
import numpy as np
import torch
from glob import glob
# Might not be available on RISE
import seaborn as sns
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split,Subset
import matplotlib
matplotlib.use('Agg')
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import urllib
from utils.dataset_utils import CustomDataset,Dubai,Masa, MasaSeven, MasaFilt, MasaFull, DubaiSeven, ImagesPre
from dateutil.parser import parse
# from torchviz import make_dot
from config import CONFIG
def move_crop(crop, offset_xy):
"""
crop has 4 columns in (h, w)-coordinate system:
1. Top-coordinate of the crop
2. Left-coordinate of the crop
3. Height of the crop
4. Width of the crop
offset_xy has 2 columns in (x, y)-coordinate system:
1. Movement in x-direction
2. Movement in y-direction
The output is given by moving crop along offset_xy
"""
# Translate offset_xy from (x,y)- to (h,w)-coordinate system
if isinstance(offset_xy, np.ndarray):
offset_hw = np.zeros_like(offset_xy)
else:
offset_hw = torch.zeros_like(offset_xy)
offset_hw[:, 0] = -offset_xy[:, 1] # H_img - offset_xy[:, 1]
offset_hw[:, 1] = offset_xy[:, 0]
# Perform the translation
if isinstance(offset_xy, np.ndarray):
moved_crop = np.zeros_like(crop)
else:
moved_crop = torch.zeros_like(crop)
moved_crop[:, :2] = crop[:, :2] + offset_hw
moved_crop[:, 2:] = crop[:, 2:]
return moved_crop
def compute_iou(crop1, crop2):
"""
Any given row of any of the two crops has the following format:
1. Top-coordinate of the crop
2. Left-coordinate of the crop
3. Height of the crop
4. Width of the crop
The output is the intersection-over-unions (IoUs) between the rows of
crop1 and crop2
"""
# Ensure correct data types and dims
if isinstance(crop1, tuple) or isinstance(crop1, list):
crop1 = np.array(crop1)[np.newaxis, :]
if isinstance(crop2, tuple) or isinstance(crop2, list):
crop2 = np.array(crop2)[np.newaxis, :]
if crop1.ndim == 1:
crop1 = crop1[np.newaxis, :]
if crop2.ndim == 1:
crop2 = crop2[np.newaxis, :]
# Get the coordinates of the intersection bounding box
try:
ihmins = np.maximum(crop1[:, 0], crop2[:, 0])
except:
print(crop1, crop2)
ihmaxs = np.minimum(crop1[:, 0] + crop1[:, 2], crop2[:, 0] + crop2[:, 2])
iwmins = np.maximum(crop1[:, 1], crop2[:, 1])
iwmaxs = np.minimum(crop1[:, 1] + crop1[:, 3], crop2[:, 1] + crop2[:, 3])
# TODO: Find out why this plus one was here
iws = np.maximum(iwmaxs - iwmins, 0)
ihs = np.maximum(ihmaxs - ihmins, 0)
# Calculate the area of the intersection
inters = iws * ihs
# Calculate the area of union
unis = crop1[:, 2] * crop1[:, 3] + crop2[:, 2] * crop2[:, 3] - inters
# Calculate and return the IoUs between crop1 and crop2
return inters / unis
def get_frac_outside(crop):
"""
Any given row of crop has the following format
1. Top-coordinate of the crop
2. Left-coordinate of the crop
3. Height of the crop
4. Width of the crop
The output is the percentage (fraction) of crops in crop (i.e. rows in
crop) that fall at least partially outside the full image
"""
# Get size of the full image
H_img, W_img = CONFIG.MISC_im_size
# Check for out-of-image
hmins_outside = crop[:, 0] < 0
hmaxs_outside = crop[:, 0] + crop[:, 2] >= H_img
wmins_outside = crop[:, 1] < 0
wmaxs_outside = crop[:, 1] + crop[:, 3] >= W_img
# Compute fraction of outside
outsides = np.logical_or(np.logical_or(hmins_outside, hmaxs_outside),
np.logical_or(wmins_outside, wmaxs_outside))
return np.count_nonzero(outsides) / len(outsides)
def normalize_coords(coords_xy, crop_locs_start, crop_locs_goal, unnormalize=False):
# Get size of the full image
H_img, W_img = CONFIG.MISC_im_size
# Start and goal bbox widths
heights_start = crop_locs_start[:, 2]
widths_start = crop_locs_start[:, 3]
heights_goal = crop_locs_goal[:, 2]
widths_goal = crop_locs_goal[:, 3]
# Perform the unnormalization
if isinstance(coords_xy, np.ndarray):
coords_xy_out = np.copy(coords_xy)
else:
coords_xy_out = torch.clone(coords_xy)
if unnormalize:
coords_xy_out[:, 0] *= (W_img - widths_start / 2 - widths_goal / 2)
coords_xy_out[:, 1] *= (H_img - heights_start / 2 - heights_goal / 2)
else:
coords_xy_out[:, 0] /= (W_img - widths_start / 2 - widths_goal / 2)
coords_xy_out[:, 1] /= (H_img - heights_start / 2 - heights_goal / 2)
return coords_xy_out
def _setup_fixed_games(n_games = 5):
""" Randomly sample n_games number of fixed games."""
# Get some images to be able to use get_random_crops
images = torch.zeros((n_games, 3, CONFIG.MISC_im_size[0] , CONFIG.MISC_im_size[1]))
_ , start_crop_locs = get_random_crops(images)
# Then sample the goal locs
_ , goal_crop_locs = get_random_crops( images, other_crop_locs = start_crop_locs , max_dist = CONFIG.RL_max_start_goal_dist)
return start_crop_locs , goal_crop_locs
def get_random_crops(images, other_crop_locs=None, max_dist=None, min_iou=None):
"""
Note that if max_dist and min_iou are both provided, then only max_dist
will be used. Hence, for min_iou to take effect, max_dist has to be None.
"""
# Define some useful constants
H, W = CONFIG.MISC_patch_size
step_sz = int(CONFIG.RL_softmax_step_size*H)
im_H,im_W = CONFIG.MISC_im_size
crop_locations = torch.zeros((images.shape[0], 4))
n_chan = images.shape[1]
n_imgs = images.shape[0]
# Initialize memory for the crops size = (batch, n_chan, H_p, W_p)
# Keep the number of channels at a constant
crops = torch.zeros(size=(n_imgs, n_chan, H, W))
for i in range(images.shape[0]):
if CONFIG.MISC_grid_game:
# Image is divided into a static uniform grid. Patches are sampled from this grid
upper_H , upper_W = int(im_H / H) , int(im_W / W)
lower_H , lower_W = ( 0 , 0)
target_pos = np.array([-1,-1])
if max_dist is not None:
target_pos = (other_crop_locs[i,0:2].numpy() / np.array(CONFIG.MISC_patch_size)).astype('int64')
upper_H , upper_W = (min(target_pos[0] + max_dist + 1, upper_H),min(target_pos[1] + max_dist + 1, upper_W)) #Has to be in int
lower_H , lower_W = (max(target_pos[0] - (max_dist ), 0) , max(target_pos[1] - max_dist, 0))
grid_loc = np.floor(np.random.uniform( low = [lower_H ,lower_W] , high = [upper_H , upper_W]))
while (grid_loc == target_pos).all():
grid_loc = np.floor(np.random.uniform( low = [lower_H ,lower_W] , high = [upper_H , upper_W]))
crop_loc = np.concatenate(((grid_loc ) *
np.array(step_sz) , np.array(CONFIG.MISC_patch_size)) ).astype('int64')
else:
# Sample the patches entirly at random
break_while = False
while not break_while:
crop_loc = transforms.RandomCrop.get_params(images[i, :, :, :][np.newaxis, :],
output_size=CONFIG.MISC_patch_size)
break_while = other_crop_locs is None or (max_dist is None and min_iou is None)
if not break_while:
# At this stage we may want to ensure that the sampled crop
# is not too far away from other_crop_locs, or that they
# do not have too little IoU-overlap
if max_dist is not None:
offset = get_regression_targets(crop_loc, other_crop_locs[i, :][np.newaxis, :],
normalize=False)
break_while = np.linalg.norm(offset) <= max_dist
elif min_iou is not None:
iou = compute_iou(crop_loc, other_crop_locs[i, :][np.newaxis, :])
break_while = iou >= min_iou
crop_locations[i, :] = torch.Tensor(np.array(crop_loc, dtype = int))
crops[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top=crop_loc[0],
left=crop_loc[1], height=crop_loc[2],
width=crop_loc[3])
return crops, crop_locations
def get_deterministic_crops(images,coords = [0,0]):
"""
Allows for extraction of deterministic crops in the image
"""
# Define some useful constants
H, W = CONFIG.MISC_patch_size
im_H,im_W = CONFIG.MISC_im_size
crop_locations = torch.zeros((images.shape[0], 4))
n_chan = images.shape[1]
n_imgs = images.shape[0]
# Initialize memory for the crops size = (batch, n_chan, H_p, W_p)
# Keep the number of channels at a constant
crops = torch.zeros(size=(n_imgs, n_chan, H, W))
# Coords can be supplied as list or tensor but needs to be in correct numpy format
if isinstance(coords , torch.Tensor):
coords = coords.detach().numpy()
if isinstance(coords , list):
coords = np.array(coords)
if len(coords.shape) == 1:
coords = np.expand_dims(coords, 0)
coords = np.repeat(coords , n_imgs , 0)
# Iterate over the images getting the correct patches
for i in range(n_imgs):
h = int(coords[i][0])
w = int(coords[i][1])
crop_loc = [h , w , H,W]
if not CONFIG.RL_agent_allowed_outside and check_outside(torch.as_tensor(crop_loc)[None,:]):
# if the agent is not allowed outside and is going to end up outside
# move it in again
crop_loc = project_into_image(crop_loc)
# Sample the crop
crops[i, :n_chan, :, :] = transforms.functional.crop(images[i, :, :, :], top= crop_loc[0], left=crop_loc[1], height=H, width=W)[:,:48,:48]
crop_locations[i, :] = torch.Tensor(np.array(crop_loc))
return crops, crop_locations
def get_regression_targets(crop_locs_start, crop_locs_goal, normalize=True):
# Get size of the full image
H_img, W_img = CONFIG.MISC_im_size
# Ensure correct data types and dims
if isinstance(crop_locs_start, tuple) or isinstance(crop_locs_start, list):
crop_locs_start = np.array(crop_locs_start)[np.newaxis, :]
if isinstance(crop_locs_goal, tuple) or isinstance(crop_locs_goal, list):
crop_locs_goal = np.array(crop_locs_goal)[np.newaxis, :]
if isinstance(crop_locs_start, np.ndarray) and crop_locs_start.ndim == 1:
crop_locs_start = crop_locs_start[np.newaxis, :]
if isinstance(crop_locs_goal, np.ndarray) and crop_locs_goal.ndim == 1:
crop_locs_goal = crop_locs_goal[np.newaxis, :]
# Start
tops_start = crop_locs_start[:, 0]
lefts_start = crop_locs_start[:, 1]
heights_start = crop_locs_start[:, 2]
widths_start = crop_locs_start[:, 3]
# Go from (h,w)- to (x,y)-coordinate system
xs_start = lefts_start + widths_start / 2
ys_start = H_img - (tops_start + heights_start / 2)
# Goal
tops_goal = crop_locs_goal[:, 0]
lefts_goal = crop_locs_goal[:, 1]
heights_goal = crop_locs_goal[:, 2]
widths_goal = crop_locs_goal[:, 3]
# Go from (h,w)- to (x,y)-coordinate system
xs_goal = lefts_goal + widths_goal / 2
ys_goal = H_img - (tops_goal + heights_goal / 2)
# Offsets
xs_offset = xs_goal - xs_start
ys_offset = ys_goal - ys_start
# Concatenate
if isinstance(xs_offset, np.ndarray):
regression_targets = np.concatenate([xs_offset[:, np.newaxis], ys_offset[:, np.newaxis]], 1)
else:
regression_targets = torch.cat([torch.unsqueeze(xs_offset, 1), torch.unsqueeze(ys_offset, 1)], 1)
# Potentially normalize regression targets to the [-1, 1]-range
if normalize:
regression_targets = normalize_coords(regression_targets, crop_locs_start, crop_locs_goal)
# Return the regression targets
return regression_targets
def load_normalize_data(download=False, batch_size=16,
multiply_images = None, split='val', use_eval_split = False):
if not CONFIG.MISC_dataset in ['masa_filt', 'masa_seven'] and split=='test':
raise(Exception("Testing mode only implemented for the masa filt dataset"))
if CONFIG.MISC_dataset.startswith('custom_'):
stat_path = os.path.join(CONFIG.MISC_dataset_path,"Custom", CONFIG.MISC_dataset[7:],'stats.json')
else:
stat_path = os.path.join(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset,'stats.json')
if os.path.exists(stat_path):
with open(stat_path) as json_file:
stats = json.load(json_file)
else:
print("Unable to find calculated mean and std for this dataset.\nUse utils/normalize_dataset.py")
exit(1)
# to control the behaviour of the function
CONFIG.RUNTIME_multiply_images = multiply_images
def collate_fn(batch):
# Transform each returned batch to desirerd format
images , labels = tuple(zip(*batch))
# If enabled make several training examples of each image
if CONFIG.RUNTIME_multiply_images is not None and CONFIG.RUNTIME_multiply_images != 1:
images = torch.stack(images)
images = torch.repeat_interleave(images,CONFIG.RUNTIME_multiply_images , dim = 0)
temp = np.asarray(labels)
start_crops = np.repeat(temp[:,0,:] , CONFIG.RUNTIME_multiply_images , axis =0)
goal_crops = np.repeat(temp[:,1,:] , CONFIG.RUNTIME_multiply_images, axis = 0)
labels = (start_crops , goal_crops)
else:
temp = np.asarray(labels)
labels = (temp[:,0,:] , temp[:,1,:])
images = torch.stack(images)
return ( images , labels)
# Select which interpolation to be used
interpolation = 'bilinear'
if interpolation == 'nearest':
# Works for label masks but totally destroys the images.
# Unable to train models on images that have been resize with nearest
interpolation_mode = transforms.InterpolationMode.NEAREST
elif interpolation == 'bilinear':
# BILINEAR interpolation ruin label masks
interpolation_mode = transforms.InterpolationMode.BILINEAR
elif interpolation == 'bicubic':
interpolation_mode = transforms.InterpolationMode.BICUBIC
else:
raise(Exception("Unkonwn interpolation mode"))
transforms_train = [transforms.Resize([CONFIG.MISC_im_size[0]+4,CONFIG.MISC_im_size[1]+4], interpolation = interpolation_mode),
transforms.CenterCrop(CONFIG.MISC_im_size)
]
transforms_val = [transforms.Resize([CONFIG.MISC_im_size[0]+4,CONFIG.MISC_im_size[1]+4], interpolation = interpolation_mode),
transforms.CenterCrop(CONFIG.MISC_im_size)]
# Data augmentation
transforms_train += [
#transforms.RandomResizedCrop(CONFIG.MISC_im_size, scale = (0.8,0.8),ratio = (1,1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip()]
# If we use seg add a dummy variable to make the transforms work properly
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['masa', 'masa_filt']:
stats['means'] += [0]
stats['stds'] += [1]
else:
stats['means'] = stats['means'][:3]
stats['stds'] = stats['stds'][:3]
transforms_train = [transforms.ToTensor()] + transforms_train
transforms_val = [transforms.ToTensor()] + transforms_val
transforms_train += [transforms.Normalize(stats["means"], stats["stds"])]
transforms_val += [transforms.Normalize(stats["means"], stats["stds"])]
transform_train = transforms.Compose(transforms_train)
transform_val = transforms.Compose(transforms_val)
if CONFIG.MISC_dataset == 'dubai':
trainset = Dubai(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train)
valset = Dubai(CONFIG.MISC_dataset_path ,split = 'val',transform =
transform_val, use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'masa':
trainset = Masa(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train)
valset = Masa(CONFIG.MISC_dataset_path ,split = 'val',transform =
transform_val)
elif CONFIG.MISC_dataset == 'masa_filt':
trainset = MasaFilt(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = MasaFilt(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'masa_seven':
trainset = MasaSeven(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = MasaSeven(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'dubai_seven':
trainset = DubaiSeven(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = DubaiSeven(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset.startswith('custom_'):
trainset = CustomDataset(CONFIG.MISC_dataset_path, CONFIG.MISC_dataset[7:],split = 'train' , transform = transform_train, custom_split_file = CONFIG.MISC_dataset_split_file)
valset = CustomDataset(CONFIG.MISC_dataset_path,CONFIG.MISC_dataset[7:],split ='val', transform = transform_val, custom_split_file = CONFIG.MISC_dataset_split_file)
elif CONFIG.MISC_dataset == 'masa_full':
trainset = MasaFull(CONFIG.MISC_dataset_path, split='train', transform=transform_train, randomRotation=True)
valset = MasaFull(CONFIG.MISC_dataset_path, split='val', transform=transform_val, randomRotation=True)
elif CONFIG.MISC_dataset == 'images_pre':
trainset = ImagesPre(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False)
valset = ImagesPre(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split)
elif CONFIG.MISC_dataset == 'images_post':
trainset = ImagesPre(CONFIG.MISC_dataset_path,split = 'train',transform
= transform_train,use_eval_split = False, post_instead=True)
valset = ImagesPre(CONFIG.MISC_dataset_path ,split=split, transform =
transform_val,use_eval_split = use_eval_split, post_instead=True)
else:
raise(Exception("Unknown dataset"))
def worker_init(*args):
signal.signal(signal.SIGINT , lambda x,y: None)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=download, num_workers=2, collate_fn=collate_fn , worker_init_fn=worker_init)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=download, num_workers=2, collate_fn=collate_fn, worker_init_fn = worker_init)
return trainloader, valloader
def visualize_batch(batch , save_name = 'vis_batch' , verbose = False , PATH = CONFIG.STATS_log_dir,
transform = None, prefix = 'agent'):
""" Visualizes all episodes in one batch. """
# Make save directory
dir_path = os.path.join(PATH , save_name)
if save_name == 'eval_0' and prefix == 'Det':
dir_path = os.path.join(PATH , 'eval')
os.makedirs(dir_path,exist_ok= True)
elif 'eval' in save_name:
dir_path = os.path.join(PATH , 'eval')
elif prefix == 'agent':
os.makedirs(dir_path)
# Limit number of episodes to visualize per batch
if CONFIG.MISC_vis_per_batch:
nbr_vis = min(CONFIG.MISC_vis_per_batch , batch.idx)
else:
nbr_vis = batch.idx
# Loop through each episode and visualize it
for i in range( nbr_vis ):
if 'eval' in save_name:
name = save_name + '_'+ prefix
else:
name = str(i)+ "_vis_" + prefix
episode , _ , weights= batch.get_episode(i, batch.steps[i].int() + 1)
visualize_trajectory(episode , save_name = name,verbose =True,
transform = transform , PATH = dir_path)
def visualize_trajectory(episode,
save_name='visualization', verbose=False, PATH = CONFIG.STATS_log_dir ,
transform = None):
"""
start_patch, goal_patch and agent_patches all have the format
(h_topleft, w_topleft, height, width)
- The patches need to be of numpy format
- To plot a sequence of agent patches just insert the sequence like (len(seq),4)
- To plot only one it is fine to insert of the shape (4)
"""
start_patch = episode.locs[0]
goal_patch = episode.loc_goal[0]
full_img = episode.image.squeeze(0).squeeze(0)[0:3,:,:].cpu()
agent_patches = episode.locs[1:, :]
rewards = episode.weights.cpu()
# Get size of the full image and patches
H_img, W_img = CONFIG.MISC_im_size
H_patches, W_patches = CONFIG.MISC_patch_size
step_sz = CONFIG.MISC_step_sz
# If we recieved a transform -> use it
if transform is not None:
full_img = transform(full_img)
# Add image to plotting
fig, ax = plt.subplots( figsize = (14, 8))
ax.imshow(np.transpose(full_img, (1, 2, 0)))
# Draw all the possible boxes
for i in range(CONFIG.MISC_game_size * CONFIG.MISC_game_size):
y, x = divmod(i, CONFIG.MISC_game_size)
rect = patches.Rectangle((x * step_sz,y * step_sz),
W_patches - 1,
H_patches - 1,
edgecolor = 'w',
facecolor = 'none')
ax.add_patch(rect)
# Add start_patch and goal_patch
# NOTE: patches.Rectangle's xy refers to y going from top to bottom, i.e. it
# is "reversed" relative to the mathematical y-axis which would go from
# bottom to top
text_offset_x = CONFIG.MISC_patch_size[0] // 2
text_offset_y = CONFIG.MISC_patch_size[0] // 2
# Check if goal and final loc is same plot success as yellow rectangle instead
final_iou = compute_iou(agent_patches[-1,:] , goal_patch)
if final_iou > CONFIG.RL_done_iou:
rect_goal_color = 'y'
else:
rect_goal_color = 'g'
rect_goal = patches.FancyBboxPatch(xy=(goal_patch[1], goal_patch[0]),
width=goal_patch[3],
height=goal_patch[2],
linewidth=1.5,
boxstyle = patches.BoxStyle("Round",
pad=-13,
rounding_size=5),
edgecolor='none', facecolor='#97D077')
rect_start = patches.FancyBboxPatch(xy=(start_patch[1], start_patch[0]),
width=start_patch[3],
height=start_patch[2],
linewidth=1.5,
boxstyle = patches.BoxStyle("Round",
pad=-13,
rounding_size=5),
edgecolor='none', facecolor='#D0CEE2')
ax.add_patch(rect_start)
ax.text(start_patch[1] + text_offset_x, start_patch[0] + text_offset_y,
"S", fontsize=23, color='w',rotation = 0,rotation_mode = 'anchor',
horizontalalignment='center',verticalalignment='center')
# Make sure that the agent-selected patches are of the corrrect dimensions
#add one so that the loop is correct
if len(agent_patches.shape) == 1:
agent_patches = agent_patches[np.newaxis, :]
# Add agent-selected patch(es)
for i in range(agent_patches.shape[0]):
agent_patch = agent_patches[i,:]
# agent_rect = patches.Rectangle(xy=(agent_patch[1], agent_patch[0]), width=agent_patch[3],
# height=agent_patch[2], linewidth=1.5, edgecolor='r', facecolor='none')
# if final_iou < CONFIG.RL_done_iou or i < (agent_patches.shape[0]-1):
# ax.add_patch(agent_rect)
ax.text(agent_patch[1] + 4*i + 4, agent_patch[0] + 6 , str(i + 1),
horizontalalignment='left',verticalalignment='center',
bbox=dict(boxstyle='circle',fc='#7EA6E0',ec='none'), fontsize=11, color='w')
# Show IoU only in the last iteration which should be the convergent one
if verbose and i == (agent_patches.shape[0]-1):
dist = np.linalg.norm(get_regression_targets(agent_patch[None,:], goal_patch[None,:], normalize=False))
iou = compute_iou(agent_patch, goal_patch)
# Set text in top right corner for easier readability
ax.text(W_img + 2, 2 , 'Final dist = '+str(round(dist,1)), fontsize=9, color='b')
ax.text(W_img + 2, 12 , 'Final IoU = '+str(round(iou.item() , 3)), fontsize=9, color='b')
# Add option to show rewards for each step
if verbose and rewards is not None:
ax.text(W_img + 2 , 20 + 10*i , 'Reward %d: %2.1f'% (i, rewards[i]), fontsize = 8 , color='b')
ax.add_patch(rect_goal)
ax.text(goal_patch[1] + text_offset_x, goal_patch[0] + text_offset_y, "G",
horizontalalignment='center',verticalalignment='center',
fontsize=23, color='w',rotation = 0,rotation_mode = 'anchor')
if save_name is not None:
# If save name is none show image instead
fig.savefig(os.path.join(PATH, save_name + '.png'))
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
else:
plt.show()
def get_crop_distance(crop_loc1 , crop_loc2):
dist = math.sqrt((crop_loc1[0] - crop_loc2[0])**2 + (crop_loc1[1] - crop_loc2[1])**2)
return dist
def check_outside(crops_loc):
H, W = CONFIG.MISC_patch_size
im_H,im_W = CONFIG.MISC_im_size
bools = torch.zeros(crops_loc.shape[0])
for i,crop_loc in enumerate(crops_loc):
bools[i] = (crop_loc[0] < -H) or (crop_loc[0] > im_H) or (crop_loc[1] < -W) or (crop_loc[1] > im_W)
return bools
def project_into_image(crop_loc):
H, W = CONFIG.MISC_patch_size
im_H,im_W = CONFIG.MISC_im_size
if crop_loc[0] < -H: crop_loc[0] = 0
elif crop_loc[0] > im_H-H: crop_loc[0] = im_H-H
if crop_loc[1] < -W: crop_loc[1] = 0
elif crop_loc[1] > im_W-W: crop_loc[1] = im_W-W
return crop_loc
# Used to select the latest results folder
def selectLatest(input):
# Transform to correct datettime format
dt = ''.join([input[0:10],input[10:19].replace('-',':')])
dt = dt.replace('_','T')
# Convert to date time and then to unix time
dt = parse(dt)
return time.mktime(dt.timetuple())
"""
Finds the latest directory in a folder sorted by the directory name format used in the logs.
"""
def find_latest_log(log_folder_path , n_latest = 0):
folders = os.listdir(log_folder_path)
# Add filtering for non directory filenames
# Should not be any non-dirs here so might be unnecessary
folders = list(filter( lambda d: os.path.isdir(os.path.join(log_folder_path , d)) , folders))
# Sort it based on name
try:
folders.sort(key = selectLatest , reverse = True)
except:
print("Error when trying to sort the logs in find_latest_log")
if n_latest >= len(folders):
# Not that many logs available
return None
else:
return folders[n_latest]
"""
Reads the contents of the config file and overwrites the current settings with the ones in the config file.
"""
def read_and_overwrite_config(config_file_path , same_data = False):
with open(config_file_path , 'r') as local_config:
# Iterate over file lines
for line in local_config:
# If it does not start with CONFIG not relevant
if line[0] != 'C' or "CONFIG." not in line:
continue
split = line.split('.')[1].strip(' #=').split('=')
setting = split[0].strip(' #')
# Check which setting
if "RL_priv" in setting:
# Get that setting
value = eval(split[1].strip(' #'))
CONFIG[setting] = value
# We also need the patch size and image size to match
if "MISC_patch_size" in setting or "MISC_im_size" in setting:
value = eval(split[1].strip(' #'))
CONFIG[setting] = value
# If enabled will also read settings that make the trainset contain exact same
# data as the model was trained on.
if same_data and "MISC_random_seed" in setting or "MISC_dataset" == setting:
value = eval(split[1].strip(' #'))
CONFIG[setting] = value
def cosine_sim_heatmap(embeddings , grid_size = 16, pos = [1,2]):
grid_size = grid_size - 1
# Generate positional embeddings for all positions
comb = torch.combinations( torch.arange(0, grid_size) , r = 2)
# Add flipped
comb = torch.cat((comb , comb.flip(dims=(1,))))
comb = torch.cat((comb , torch.cat((torch.arange(0,grid_size).unsqueeze(1),torch.arange(0,grid_size).unsqueeze(1)),dim = 1))).long()
pos_embedding = torch.flatten(embeddings[0,comb ] , start_dim = 1, end_dim = 2)
# Plot heat map of one positional embedding compared to the rest
selected_pos = torch.tensor(pos)
selected_pos_emb = pos_embedding[ ( selected_pos == comb).all(dim = 1) , :]
cosine_sim = torch.nn.CosineSimilarity( dim = 1)
# Calculate
cosine_similarities = cosine_sim( selected_pos_emb , pos_embedding)
# Due to torch's inability to index with tensors properly we will have to inser
# the values manually
heat_map_tensor = torch.ones((grid_size , grid_size ))
for (i, sim) in enumerate(cosine_similarities):
heat_map_tensor[comb[i,0] , comb[i,1]] = sim
# Convert to numpy
heat_map_numpy = heat_map_tensor.cpu().numpy()
ax = sns.heatmap(heat_map_numpy)
ax.set_title('Positional Encoding: (%d,%d)' % (pos[0] , pos[1]))
plt.savefig(os.path.join(CONFIG.STATS_log_dir , "positional_embeddings" , "embedding_%d_%d.jpg" % (pos[0] , pos[1])))
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def calculate_cnn_output_size(layers, input_size, max_pool):
""" OLD_NOT_USED : layer = [kernel , out_chan, padding, stride, in_chan]"""
""" layer = [in_chan , out_chan, kernel, stride, padding]"""
H , W = input_size
C = layers[0][4]
for layer in layers:
H = (H + 2 * layer[4] - (layer[2] - 1) - 1 ) / layer[3] + 1
W = (W + 2 * layer[4] - (layer[2] - 1) - 1 ) / layer[3] + 1
C = layer[1]
# If max_pool avaialble assume there is such a layer after each conv
if max_pool is not None:
H = (H - (max_pool[0] - 1) - 1) / max_pool[1] + 1
W = (W - (max_pool[0] - 1) - 1) / max_pool[1] + 1
H = math.floor(H)
W = math.floor(W)
return [H,W,C]
def crop_s(crop1 , crop2 ,T = None, DEBUG = True):
"""
Simply plots two crops side by side. Usefull for debugging.
"""
if DEBUG:
# Assume in debug mode and switch to TkAgg backend
matplotlib.use('TkAgg')
# If a transform is supplied use it
if T is not None:
crop1 , crop2 = T(crop1) , T(crop2)
fig, axes = plt.subplots(nrows = 1 , ncols = 2)
axes[0].imshow(crop1.permute(1,2,0))
axes[1].imshow(crop2.permute(1,2,0))
axes[0].set_title("Crop 1")
axes[1].set_title("Crop 2")
# TODO
if DEBUG:
plt.show()
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def get_correct_act(loc_start , loc_goal):
""" Calculate which action (in ohe form) is the correct."""
grid_correct_move = ( loc_goal[0,0:2] - loc_start[0,0:2]) / ( CONFIG.RL_softmax_step_size * torch.tensor(CONFIG.MISC_patch_size)).long()
# Due to flooring of negative integers
grid_correct_move = grid_correct_move.round()
# remember first coordinate is y (positive downwards)
dx, dy = grid_correct_move[1].item() , grid_correct_move[0].item()
# now map this move to ohe
if dx == 0 and dy == -1:
i = 0 # Move up
elif dx == 1 and dy == -1:
i = 1 # Move up right
elif dx == 1 and dy == 0:
i = 2 # Move right
elif dx == 1 and dy == 1:
i = 3 # move right down
elif dx == 0 and dy == 1:
i = 4 # move down
elif dx == -1 and dy == 1:
i = 5 # Move down left
elif dx == -1 and dy == 0:
i = 6 # Move left
elif dx == -1 and dy == -1:
i = 7 # Move up left
else:
raise(Exception("Unknown move in get_correct_act. Is maximum distance really 1?"))
correct_act = torch.zeros((1,8))
correct_act[0,i] = 1
return correct_act
def setupLogDir( log_dir_path):
"""
Create log directory and save all current files. If pretrained network is enabled
move the config files from that log directory to this one as well.
"""
if not os.path.exists(CONFIG.STATS_log_dir_base):
os.makedirs(CONFIG.STATS_log_dir_base)
os.makedirs(CONFIG.STATS_log_dir, exist_ok = False)
CONFIG.STATS_vis_dir = os.path.join(CONFIG.STATS_log_dir, "visualizations")
os.makedirs(CONFIG.STATS_vis_dir)
CONFIG.STATS_scripts_dir = os.path.join(CONFIG.STATS_log_dir, "saved_scripts")
os.makedirs(CONFIG.STATS_scripts_dir)
copyfile("training/train_agent.py" , os.path.join(CONFIG.STATS_scripts_dir, "train_agent.py"))
copyfile("config.py" , os.path.join(CONFIG.STATS_scripts_dir , ".." , "config.py"))
# Save Network files
copyfile("networks/rnn_agents.py", os.path.join(CONFIG.STATS_scripts_dir, "rnn_agents.py"))
# Save Utils files
copyfile("utils/utils.py", os.path.join(CONFIG.STATS_scripts_dir, "utils.py"))
copyfile("utils/training_utils.py" , os.path.join(CONFIG.STATS_scripts_dir , "training_utils.py"))
copyfile("utils/agent_utils.py" , os.path.join(CONFIG.STATS_scripts_dir, "agent_utils.py"))
# Create folder for saving intermediate models
#os.makedirs(os.path.join(CONFIG.STATS_log_dir, "models"))
CONFIG.STATS_metrics_dir = os.path.join(CONFIG.STATS_log_dir, "metrics")
os.makedirs(CONFIG.STATS_metrics_dir)
# If pretrained network is to be loaded save the config files from that folder as well
if CONFIG.RL_priv_pretrained:
CONFIG.STATS_pretrained_log_dir = os.path.join(CONFIG.STATS_log_dir, "pretrained")
os.makedirs(CONFIG.STATS_pretrained_log_dir)
embedder_type_mapping = dict([
("Doerch" , 'RL_pretrained_doerch_net'),
("ShareNet" , 'RL_pretrained_doerch_net'),
("Segmentations", 'RL_pretrained_segmentation_net')
])
pre_trained_dir = CONFIG[embedder_type_mapping[CONFIG.RL_patch_embedder]]
# Check if pretrained directory really exists
if not os.path.exists(pre_trained_dir):
raise(Exception("Unable to find pretrained directory:\t%s" , pre_trained_dir))
pretrained_files = glob(pre_trained_dir + "/*.py")
pretrained_files += glob(pre_trained_dir + "/*.json")
for file in pretrained_files:
dst_file = file[file.rfind("/")+1:]
copyfile(file, os.path.join(CONFIG.STATS_pretrained_log_dir, dst_file))
def get_pos_goal_loc(loc , grid_size , distance):
"""
Get all possible grid positions at a given distance from loc.
"""
pos_grid = np.ones((grid_size,grid_size)) * -1
try:
pos_grid[loc[0],loc[1]] = 0
except:
pdb.set_trace()
for d in range(1, distance + 1 ):
# Get index of previous locations
inds = np.asarray(np.where(pos_grid == (d - 1))).T
# For each direction take a step and write new distance (if possible)
for i in range(0,8):
if i == 0:
dir = np.array([[-1,0]])
elif i == 1:
dir = np.array([[-1,1]])
elif i == 2:
dir = np.array([[0,1]])
elif i == 3:
dir = np.array([[1,1]])
elif i == 4:
dir = np.array([[1,0]])
elif i == 5:
dir = np.array([[1,-1]])
elif i == 6:
dir = np.array([[0,-1]])
elif i == 7:
dir = np.array([[-1,-1]])
inds_dir = inds + dir
# Filter out all new locations that are outside grid
inds_dir = inds_dir[((inds_dir >= 0) & (inds_dir < grid_size)).all(axis= 1),:]
# Also filter out any new location already visited
inds_dir = inds_dir[ (pos_grid[inds_dir[:,0] , inds_dir[:,1]] == -1), :]
# Write new distance
if len(inds_dir) > 0:
pos_grid[inds_dir[:,0] , inds_dir[:,1]] = d
# When completed find indicies of positions with distance
arrays_distance = np.stack(np.where( pos_grid == distance)).T
if len(arrays_distance) == 0:
raise(Exception("Did not find any valid locations"))
return arrays_distance
def sample_grid_games_fixed_distance( distance, n_games):
"""
Samples n_games with fixed distance between start goal.
Returns: n_games x 4 with start and goal loc
"""
# Calculate grid size
grid_size = CONFIG.MISC_im_size[0] // CONFIG.MISC_step_sz + 1
# Determine if bounds are needed for sampling start
if distance > grid_size // 2:
# TODO
start_bounds = 0
# sample start locations
# Due to distance being so large that not all start locations are possible
# create a grid of valid start positions to sample from
grid = np.reshape( np.arange(0,grid_size**2) , (grid_size, grid_size))
grid_pos_dist = (grid_size - distance)
# Check that grid_pos_dist is positive
if grid_pos_dist <= 0:
raise(ValueError("Distance equal to or larger than grid size."))
grid[grid_pos_dist:grid_size - grid_pos_dist , grid_pos_dist:grid_size - grid_pos_dist] = -1
grid = grid.flatten()
grid = np.delete( grid, np.where( grid == -1))
# Grid now contains 1D array of allowed start positions
start_pos = np.random.choice( grid, size = n_games)
start_locs = np.vstack( ( start_pos // grid_size, start_pos % grid_size)).T
else:
# No need for bounds sample start location
start_locs = np.random.randint(0, grid_size, size = (n_games, 2))
goal_locs = np.zeros_like(start_locs)
# For each unique start location generate
unique_start_locs = np.unique(start_locs , axis = 0)
pos_goal_locs = {}
for loc in unique_start_locs:
# Calculate number of locs with this position
locs_ind = (( start_locs == loc).all(axis = 1))
pos_goal_locs = get_pos_goal_loc(loc, grid_size , distance)
rand_inds = np.random.randint( pos_goal_locs.shape[0] , size = locs_ind.sum())
# These are the sampled goal locations for this start positio
local_goal_locs = pos_goal_locs[rand_inds, :]
# take these goal locs and put them in the ot
goal_locs[ np.where( locs_ind) , : ] = local_goal_locs
return np.hstack( (start_locs, goal_locs))
| 40,412 | 38.159884 | 181 |
py
|
airloc
|
airloc-master/utils/create_split_file.py
|
import pandas as pd
import os
import matplotlib
import argparse
import torch
from utils.dataset_utils import *
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import time
import numpy as np
from doerchnet.utils import visualize_doerch
from utils.utils import sample_grid_games_fixed_distance
from config import CONFIG
def create_fixed_game_split_file(args ):
if args.dataset is None:
print("\nERROR:\tPlease select a dataset\n")
exit(1)
# Check if dataset exists
dataset_path = os.path.join(CONFIG.MISC_dataset_path , args.dataset)
if not os.path.exists(dataset_path):
print("\nERROR:\tDataset not found:\t%s\n" % args.dataset)
exit(1)
# Locate current split file
if args.grid_game:
split_file_path = os.path.join(dataset_path , "list_eval_partition_grid.csv")
else:
split_file_path = os.path.join(dataset_path, "list_eval_partition.csv")
# Check its existence
if not os.path.exists(split_file_path):
print("Current %s split file not found." % str(args.grid_game))
exit(2)
# Read split file and filter out for this split
orig_split = pd.read_csv(split_file_path)
if args.split == 'train':
orig_split = orig_split[orig_split.partition == 0.0]
elif args.split == 'val':
orig_split = orig_split[orig_split.partition == 1.0]
elif args.split == 'test':
orig_split = orig_split[orig_split.partition == 2.0]
else:
raise(Exception("Unexpected split:\t%s" % args.split))
# For each file in the dataframe create args.number_games games and append to the new split
# First two are start coordinates and last two are goal coordinates
n_imgs = orig_split.shape[0]
games = np.zeros((orig_split.shape[0] * args.number_games , 4))
locs_start = np.array([[0,4]]).repeat(orig_split.shape[0],axis=0)
for i in range(1,args.number_games + 1):
if args.number_games == 24:
idx = i
idx-=1
if idx >=20: idx+=1
y,x = divmod(idx,5)
print(x,y)
locs_goal = np.array([[x,y]]).repeat(orig_split.shape[0],axis=0)
temp = np.hstack((locs_start,locs_goal))
else:
if i > CONFIG.MISC_game_size - 1:
difficulty = i % (CONFIG.MISC_game_size - 1)
else:
difficulty = i
print(difficulty)
temp = sample_grid_games_fixed_distance(difficulty , n_imgs)
games[(i-1)*n_imgs:i*n_imgs,:] = temp
# Now games have been sampled. Multiply by patch size to get coordinates and write to file
games *= CONFIG.MISC_step_sz
file_names = np.expand_dims(np.array(list(orig_split['image_id']) * args.number_games) , axis = 1)
data = np.hstack( (file_names, games, np.ones((orig_split.shape[0] * args.number_games, 2)) * CONFIG.MISC_patch_size))
cols = ["image_id" , "start_y", "start_x" , "goal_y", "goal_x" , "patch_size_x" , "patch_size_y"]
new_split = pd.DataFrame(data , columns = cols)
if args.number_games == 24:
# Write to file
new_split_file_path = os.path.join(dataset_path , "%s_eval_partition_grid_fixed_start.csv" % args.split)
new_split.to_csv(new_split_file_path)
else:
# Write to file
new_split_file_path = os.path.join(dataset_path , "%s_eval_partition_grid_fixed_distance.csv" % args.split)
new_split.to_csv(new_split_file_path)
def verify_split_file(args):
matplotlib.use('TkAgg')
transform = transforms.Compose([
transforms.Resize(CONFIG.MISC_im_size),
transforms.ToTensor()
])
# Load dataset
if args.dataset == 'masa_filt':
dataset = MasaFilt(CONFIG.MISC_dataset_path,split = 'val', use_eval_split = True)
else:
raise(Exception("Unkown dataset"))
image , locs = dataset.getitem__by_image_id(args.image_id)
# Convert image to tensor
image = transform(image)
locs = np.concatenate((locs, 48 * np.ones((4,2,2))) , axis = 2)
locs = torch.from_numpy(locs)
for i in range(locs.shape[0]):
visualize_doerch(image, locs[i,0,:] , locs[i,1,:] ,torch.tensor([1,0]), save_name = 'vis_%d'%i)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset" , "-d" , type=str , default = None, help = "Select which dataset to create split file for.")
parser.add_argument("--split" , type = str , default = "val" , help = "Which split should be created.")
parser.add_argument("--grid-game" , type = bool, default = True)
parser.add_argument("--number-games", type = int, default = CONFIG.MISC_game_size - 1 , help = "Number of games to create for each image.")
parser.add_argument("--verify", type = bool, default = False, help = "Verify new split file visually.")
parser.add_argument("--image-id" , type = str, help = "Which image to visualize")
args = parser.parse_args()
if not args.verify:
print("Creating eval split file for %s" % args.dataset)
print("Partition selected:\t%s" % args.split)
print("Number of games per image:\t%d" % args.number_games)
create_fixed_game_split_file(args)
else:
print("Verifying current eval split file.")
verify_split_file(args)
| 5,333 | 34.324503 | 143 |
py
|
airloc
|
airloc-master/utils/create_game_files.py
|
#!/bin/env python3
import numpy as np
import random
import pandas as pd
from shutil import copyfile
import argparse
import os, sys
from config import CONFIG
argparse = argparse.ArgumentParser()
argparse.add_argument("--seed" , type = int , default = 0 , help = "Set seed for random number generators")
argparse.add_argument("dataset" , type = str , help = "Select which dataset to use")
argparse.add_argument("--number-games-diff" , "-n" , type = int , default = 4, help = "Select the number of games per difficulty")
args = argparse.parse_args()
# Set seeds
np.random.seed(args.seed)
random.seed(args.seed)
# Check that dataset is available
dataset_path = os.path.join(CONFIG.MISC_dataset_path, args.dataset)
if not os.path.exists(dataset_path):
print("Dataset does not exist:\t%s" % args.dataset)
sys.exit(0)
# Check that it has a split file with fixed distance
if not os.path.exists(os.path.join(dataset_path, "val_eval_partition_grid_fixed_distance.csv")):
print("Dataset does not have 'val_eval_partition_grid_fixed_distance.csv'.")
sys.exit(0)
data_path = os.path.join(dataset_path , "image")
# Read split file
split = pd.read_csv(os.path.join(dataset_path, "val_eval_partition_grid_fixed_distance.csv"))
# Create map structure to save game splits
os.makedirs("game_files", exist_ok = True)
i = 1
game_split = "game_split_%d" % i
while (os.path.exists(os.path.join("game_files" , game_split))):
i += 1
game_split = "game_split_%d" % i
game_split_dir = os.path.join("game_files" , game_split)
os.makedirs(game_split_dir)
n_rows = split.shape[0]
n_images = n_rows // 4
# Divide the dataset into n number of divisions
divs = n_images // ( args.number_games_diff * 4)
for div in range(divs):
for i in range(4):
# select rows in split file
offset = [x % 4 for x in range(i, i + 4)]
base = div * args.number_games_diff
indx = []
for p in range(4):
indx += list(range(base + offset[p] * args.number_games_diff + p * n_images, base + offset[p] * args.number_games_diff + p * n_images + args.number_games_diff))
try:
local_split = split.iloc[indx]
except:
print("Unable to split with iloc:\t%s"%div)
pass
# Also create warmup folder with some randomly sampled images
warmup_indx = []
for loop in range(10): # Use ten warmup images
warm_ind = random.randint(0, split.shape[0])
while warm_ind in indx:
warm_ind = random.randint(0, split.shape[0])
warmup_indx.append(warm_ind)
warm_split = split.iloc[warmup_indx]
local_dir = os.path.join(game_split_dir, "game_split_%d_%d" % (div, i))
local_data_dir = os.path.join(local_dir, "images")
local_warmup_data_dir = os.path.join(local_dir, "warmup_images")
# Create folder for split
os.makedirs(local_data_dir)
os.makedirs(local_warmup_data_dir)
# Write split file
local_split.to_csv(os.path.join(local_dir, "split_file.csv"))
warm_split.to_csv(os.path.join(local_dir, "warmup_split_file.csv"))
# Move images to local data dir
for image_id in local_split.image_id:
copyfile(os.path.join(data_path, image_id) , os.path.join(local_data_dir, image_id))
for image_id in warm_split.image_id:
copyfile(os.path.join(data_path, image_id) , os.path.join(local_warmup_data_dir, image_id))
| 3,539 | 29 | 173 |
py
|
airloc
|
airloc-master/utils/training_utils.py
|
import torch
import sys
import numpy as np
import torchvision.transforms as transforms
from copy import deepcopy
import time
import math
import torchvision.transforms.functional as F
from torchvision.transforms import Resize
from utils.agent_utils import rewards_to_go, normalize_batch_weights
from utils.utils import get_random_crops, get_deterministic_crops, compute_iou,\
get_correct_act, sample_grid_games_fixed_distance
from doerchnet.utils import sample_doerch_crops
from config import CONFIG
class BatchStorage():
""" Class used as storage container for information generated during one batch."""
def __init__(self,sc, CONFIG = CONFIG ):
self.CONFIG = CONFIG
self.device = CONFIG.device
self.ep_len = self.CONFIG.RL_max_episode_length
self.n_eps = self.CONFIG.RL_batch_size * self.CONFIG.RL_multiply_images
self.p_H, self.p_W = self.CONFIG.MISC_patch_size
self.im_H, self.im_W = self.CONFIG.MISC_im_size
self.sc = sc
self.proptime = 0
self.max_len_batch = self.ep_len * self.n_eps
def initialize(self, batch_size=None):
# Allocate memory for all inputs, priv info, trainign tensors
if batch_size: self.n_eps = batch_size
# This index is the number of episodes processed
self.idx = 0
self.difficulty = torch.zeros(self.n_eps)
# Allocate for those that are always active
self.weights = torch.zeros((self.n_eps,self.ep_len))
# Need this nan stuff to make it work
self.weights[:,:] = torch.nan
# TODO: Maybe add an extra dimension if we need duplicate storage
self.locs = torch.zeros((self.n_eps , self.ep_len + 1, 4))
self.locs_goal = torch.zeros((self.n_eps, 4))
# Check if instance segmentation input is enabled if so add one extrac channel
n_chan_images = 3
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['dubai']:
n_chan_images = 6
elif CONFIG.MISC_dataset in ['masa', 'masa_filt']:
n_chan_images = 4
else:
raise(Exception("Define which type of segmentation this dataset has"))
self.image = torch.zeros((self.n_eps, n_chan_images, self.im_H , self.im_W))
# Account for the extra channel tracking where the image ends
n_chan_crops = n_chan_images
# Add extra one crop for final position after final action
self.crops = torch.zeros((self.n_eps, self.ep_len + 1, n_chan_crops, self.p_H, self.p_W)).to(self.CONFIG.device)
# If softmax enabled the action is onehotencoding of 8 possible direcitons
self.actions = torch.zeros((self.n_eps, self.ep_len , 8)).to(self.CONFIG.device)
# Distance from current crop to goal for all action-reward-state tuples
self.dists = torch.zeros((self.n_eps, self.ep_len)).to(self.device)
self.dists[:,:] = torch.nan
self.iou = torch.zeros((self.n_eps,))
self.has_converged = torch.zeros((self.n_eps,))
self.crops_goal = torch.zeros((self.n_eps , n_chan_crops, self.p_H, self.p_W)).to(self.CONFIG.device)
if CONFIG.RL_priv_grid_location:
self.grid_loc = torch.zeros((self.n_eps , self.ep_len , int(self.im_H / self.p_H) , int(self.im_W/self.p_W))).to(CONFIG.device)
self.grid_curr_loc = torch.zeros((self.n_eps , self.ep_len , int(self.im_H / self.p_H) , int(self.im_W/self.p_W))).to(CONFIG.device)
# Batch statistics
self.steps = torch.zeros(( self.n_eps , ))
self.iou = torch.zeros(( self.n_eps , ))
self.hasConverged = torch.zeros(( self.n_eps , ))
self.final_distance = torch.zeros((self.n_eps , ))
self.final_distance[:] = torch.nan
self.distance = torch.zeros(( self.n_eps , ))
self.cumulativeRewardToGo = torch.zeros(( self.n_eps , ))
# Set all values to nan. Only use episodes which were successfull for mean calc
self.stepRatio = torch.zeros(( self.n_eps , ))
self.stepRatio[:] = torch.nan
self.time = torch.zeros(( self.n_eps , ))
def append_episode(self, episode):
""" Add all information from one episode to this batch """
# First insert state, reward, action tuples
self.steps[self.idx] = episode.step
self.crops[self.idx ,:episode.step +1,: ,:,:] = episode.crops
self.weights[self.idx , :episode.step] = episode.weights
self.actions[self.idx , :episode.step,:] = episode.actions
self.dists[self.idx , :episode.step] = episode.dists
# If agent did not succeed add final dist
if episode.iou.item() < CONFIG.RL_done_iou:
self.final_distance[self.idx] = (( episode.loc_goal[0,:2] - episode.locs[-1,:2]) / int(CONFIG.MISC_patch_size[0] * CONFIG.RL_softmax_step_size)).abs().int().max().item()
self.locs[self.idx , :episode.step + 1, :] = episode.locs
self.locs_goal[self.idx , :] = episode.loc_goal
self.iou[self.idx] = episode.iou
self.hasConverged[self.idx] = 1.0 if episode.iou >= CONFIG.RL_done_iou else 0.0
self.distance[self.idx] = episode.dists[-1]
self.cumulativeRewardToGo[self.idx] = episode.weights[0]
# Only add value for step ratio if agent was successfull this episode
if episode.iou >= CONFIG.RL_done_iou:
self.stepRatio[self.idx] = episode.min_steps / episode.step
self.time[self.idx] = episode.time
self.image[self.idx, :,:,:] = episode.image
# The difficulty of the played game
self.difficulty[self.idx] = episode.dists[0]
if CONFIG.RL_priv_grid_location:
self.grid_loc[self.idx , :,:,:] = episode.grid_loc
self.grid_curr_loc[self.idx ,:,:,:] = episode.grid_curr_loc
self.crops_goal[self.idx , :,:,:] = episode.crop_goal
self.idx += 1
def prepare_for_loss(self):
# Do any necessary prepartions for loss computationo
# For now only normalize the weights
self.weights = normalize_batch_weights(self.weights, self.dists).detach()
def get_episode(self, ep_id , step_id):
""" Retrieves a detached episode from the batch."""
ep = EpisodeStorage(CONFIG = self.CONFIG)
ep.initialize(self.image[ep_id,:,:,:][None,:].clone())
ep.crops = self.crops[ep_id , 0:step_id , :,:,:].clone()
# Set episode values
ep.crop_goal = self.crops_goal[ep_id,:,:,:].clone().unsqueeze(0)
ep.locs = self.locs[ep_id , :step_id ,:]
ep.loc_goal = self.locs_goal[ep_id, :].unsqueeze(0)
ep.weights = self.weights[ep_id,:step_id]
# Set correct number of steps taken
ep.step = step_id-1
if CONFIG.RL_priv_grid_location:
ep.grid_loc = self.grid_loc[ep_id]
ep.grid_curr_loc = self.grid_curr_loc[ep_id]
if step_id > self.steps[ep_id]:
# Get entire episode, no action in final state
action = None
weight = None
else:
action = self.actions[ep_id,step_id-1,:]
weight = self.weights[ep_id,step_id-1]
# Return state (episode), the taken action, and the weight for this tuple
return ep , action, weight
def store(self, mode = '', eval=False):
"""
Stores the statistics for the batch
"""
batch_final_distance = self.final_distance.nanmean()
if not math.isnan(batch_final_distance.item()):
self.sc.s(mode + 'FinalDistanceOnlyFailure').collect(batch_final_distance.item())
batch_final_distance = torch.tensor([0.0])
self.sc.s(mode + 'Steps').collect(self.steps.mean().item())
self.sc.s(mode + 'IoU').collect(self.iou.mean().item())
self.sc.s(mode + 'CumulativeRewardToGo').collect(self.cumulativeRewardToGo.mean().item())
self.sc.s(mode + 'HasConverged').collect(self.hasConverged.float().mean().item())
inds = self.dists[:,0].repeat(6,1).cpu() == torch.as_tensor([[1],[2],[3],[4],[5],[6]])
temp = np.array([self.steps[inds[0]].mean().item(),
self.steps[inds[1]].mean().item(),
self.steps[inds[2]].mean().item(),
self.steps[inds[3]].mean().item(),
self.steps[inds[4]].mean().item(),
self.steps[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedSteps').collect(temp)
temp = np.array([self.iou[inds[0]].mean().item(),
self.iou[inds[1]].mean().item(),
self.iou[inds[2]].mean().item(),
self.iou[inds[3]].mean().item(),
self.iou[inds[4]].mean().item(),
self.iou[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedIoU').collect(temp)
temp = np.array([self.cumulativeRewardToGo[inds[0]].mean().item(),
self.cumulativeRewardToGo[inds[1]].mean().item(),
self.cumulativeRewardToGo[inds[2]].mean().item(),
self.cumulativeRewardToGo[inds[3]].mean().item(),
self.cumulativeRewardToGo[inds[4]].mean().item(),
self.cumulativeRewardToGo[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedCumulativeRewardToGo').collect(temp)
temp = np.array([self.hasConverged[inds[0]].mean().item(),
self.hasConverged[inds[1]].mean().item(),
self.hasConverged[inds[2]].mean().item(),
self.hasConverged[inds[3]].mean().item(),
self.hasConverged[inds[4]].mean().item(),
self.hasConverged[inds[5]].mean().item(),
])
self.sc.s(mode + 'SeparatedHasConverged').collect(temp)
# Store the relative difficulty of the played games
relative_diff = np.array([
((self.difficulty == 1).sum() / self.n_eps).item() ,
((self.difficulty == 2).sum() / self.n_eps).item() ,
((self.difficulty == 3).sum() / self.n_eps).item() ,
((self.difficulty == 4).sum() / self.n_eps).item() ,
])
self.sc.s(mode + 'Difficulty').collect(relative_diff)
batch_step_ratio = self.stepRatio.nanmean()
if not math.isnan(batch_step_ratio.item()):
self.sc.s(mode + 'StepRatioOnlySuccess').collect(batch_step_ratio.item())
batch_step_ratio = torch.tensor([0.0]).cpu()
#self.sc.s(mode + 'StepTime').collect(self.time.float().mean().item())
#self.sc.s(mode + 'PropTime').collect(self.proptime)
temp_actions_taken = torch.flatten(self.actions , start_dim = 0, end_dim = 1)
temp_actions_taken = temp_actions_taken[temp_actions_taken.sum(dim = 1) != 0 , : ].mean(dim = 0)
self.sc.s(mode + 'ActionsTaken').collect( temp_actions_taken.cpu().numpy())
#temp_correct_actions = self.correct_act[ self.correct_act.sum(dim = 1) != 0 , :].mean(dim = 0)
#self.sc.s(mode + 'CorrectActions').collect(temp_correct_actions.cpu().numpy())
if eval:
self.sc.s(mode + 'GoalLoc').collect(self.locs_goal[0].numpy())
self.sc.s(mode + 'ActionProbs').collect(self.actions[0].cpu().numpy())
class EpisodeStorage():
""" Class used as storage container for all information generated during run of one episode."""
def __init__(self, CONFIG=CONFIG):
self.CONFIG = CONFIG
self.device = CONFIG.device
self.ep_len = self.CONFIG.RL_max_episode_length +1
self.p_H,self.p_W = self.CONFIG.MISC_patch_size
self.im_H,self.im_W = self.CONFIG.MISC_im_size
self.misc = []
# If grid_game is enabled calculate grid_size
if CONFIG.MISC_grid_game:
self.grid_size = (int(self.im_H / self.p_H) , int(self.im_W / self.p_W))
def initialize(self, image, loc_goal=None, loc_start=None, probs_diff=None):
# if enabled we have received a signal interrupt and should exit
if CONFIG.TERMINATE:
sys.exit(1)
image = image.detach()
self.image = image
self.step = 0
self.time = time.perf_counter()
self.misc = []
# Allocate for those that are always active
self.weights = torch.zeros((self.ep_len-1)).to(self.CONFIG.device)
# TODO: Maybe add an extra dimension if we need duplicate storage
# If softmax agent enabled the action is a onehotencoding
self.actions = torch.zeros((self.ep_len-1 , 8)).to(self.CONFIG.device)
self.locs = torch.zeros((self.ep_len , 4))
# Check if instance segmentation input is enabled
n_chan_images = 3
if CONFIG.RL_priv_use_seg:
if CONFIG.MISC_dataset in ['dubai']:
n_chan_images = 6
elif CONFIG.MISC_dataset in ['masa', 'masa_filt']:
n_chan_images = 4
else:
raise(Exception("Define which type of segmentation this dataset has"))
# Take care of extra channel for some of the crops
n_chan_crops = n_chan_images
self.dists = torch.zeros((self.ep_len))
self.crops = torch.zeros((self.ep_len, n_chan_crops, self.p_H, self.p_W)).to(self.CONFIG.device)
# Sample or load the start and goal crops
if loc_goal is not None and loc_start is not None:
self.loc_goal = loc_goal
self.loc_start = loc_start
self.crop_goal,self.loc_goal = get_deterministic_crops(image,loc_goal)
# Either sample or load the start patch
self.crops[self.step,:,:,:] ,self.loc_start= get_deterministic_crops(image,loc_start)
else:
if probs_diff is None:
self.crops[0,:,:,:] , self.loc_start = get_random_crops(image)
self.crop_goal, self.loc_goal = get_random_crops(image,self.loc_start.cpu(),
max_dist= self.CONFIG.RL_max_start_goal_dist,
min_iou = self.CONFIG.RL_min_start_goal_iou)
self.correct_act = torch.zeros(1,8)
#self.crops[0,:,:,:], self.crop_goal, self.correct_act, self.loc_start,self.loc_goal = sample_doerch_crops(image)
else:
# Sample game with random difficulty according to vector
diff = np.random.choice(4, p = probs_diff.numpy()) + 1
game = sample_grid_games_fixed_distance( diff, 1) * CONFIG.MISC_step_sz
self.crops[0,:,:,:], self.loc_start = get_deterministic_crops(image, game[:,0:2])
self.crop_goal , self.loc_goal = get_deterministic_crops(image, game[:,2:])
self.correct_act = torch.zeros(1,8)
self.crop_goal = self.crop_goal.to(self.CONFIG.device)
self.min_steps = ((self.loc_goal[0,:2] - self.loc_start[0,:2])/int(self.p_H*CONFIG.RL_softmax_step_size)).abs().int().max()
if CONFIG.MISC_grid_game:
self.dists[self.step] = ((self.loc_goal[0,:2] - self.loc_start[0,:2])/int(self.p_H*CONFIG.RL_softmax_step_size)).abs().int().max()
else:
self.dists[self.step] = torch.linalg.norm((self.loc_start-self.loc_goal)[:2])
self.locs[0 ,:] = self.loc_start
self.loc_current = self.loc_start.clone().detach()
# If enabled, create a grid of all possible locations in the image
# fill this grid with ones for where the agent has been
if CONFIG.RL_priv_grid_location:
hg, wg = int(self.im_H / self.p_H) , int(self.im_W / self.p_W)
self.grid_loc = torch.zeros((CONFIG.RL_max_episode_length, hg, wg)).to(CONFIG.device)
self.grid_curr_loc = torch.zeros((CONFIG.RL_max_episode_length, hg, wg)).to(CONFIG.device)
grid_loc_start = int(self.loc_start[0,0].item() / self.p_H) , int(self.loc_start[0,1].item() / self.p_W)
# Fill in initial position
self.grid_loc[:,grid_loc_start[0] ,grid_loc_start[1]] = 1
self.grid_curr_loc[0,grid_loc_start[0] , grid_loc_start[1]] = 1
def update(self, action, reward, loc, crop, misc=None):
# Add a new step to the trajectory
self.actions[self.step,:] = action
self.weights[self.step] = reward
self.step += 1
self.locs[self.step , :] = loc
self.misc.append(misc)
if CONFIG.MISC_grid_game:
self.dists[self.step] = ((self.loc_goal[0,:2] - loc[0,:2])/int(self.p_H*CONFIG.RL_softmax_step_size)).abs().int().max()
else:
self.dists[self.step] = torch.linalg.norm((loc-self.loc_goal)[:2])
self.crops[self.step, :,:,:] = crop
self.loc_current = loc.detach().clone()
# fill in the grid location of where the agent is currently
if CONFIG.RL_priv_grid_location:
grid_loc = int(loc[0,0].item() / self.p_H) , int(loc[0,1].item() / self.p_W)
# If agent has walked outside, don't fill in
if (grid_loc[0] < 0 or grid_loc[0] >= self.grid_size[0]) or (grid_loc[1] < 0 or grid_loc[1] >= self.grid_size[1]):
pass
else:
self.grid_loc[(self.step-1): , grid_loc[0] , grid_loc[1] ] = 1
self.grid_curr_loc[(self.step -1), grid_loc[0] , grid_loc[1] ] = 1
def finish(self):
self.time = (time.perf_counter() - self.time)/self.step
self.dists = self.dists[:self.step]
self.crops = self.crops[:self.step + 1,:,:,:]
self.weights = self.weights[:self.step]
self.weights = rewards_to_go(self.weights)
self.actions = self.actions[:self.step,:]
self.locs = self.locs[0:self.step +1, :]
# Compute final iou
self.iou = compute_iou(self.loc_goal , self.locs[-1,:])
self.hasConverged = self.iou >= CONFIG.RL_done_iou
| 18,061 | 44.38191 | 181 |
py
|
airloc
|
airloc-master/utils/split_images.py
|
import os
import math
import glob
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex,to_rgb
from config import CONFIG
import cv2
import numpy as np
import regex as re
matplotlib.use('Agg')
data = 'masa'
if data == 'dubai':
labels = ['#3C1098', '#8429F6', '#6EC1E4', '#FEDD3A', '#E2A929', '#9B9B9B','#000000' ]
labels = [int(to_rgb(l)[0]*255) for l in labels]
cls = range(1,len(labels)+1)
labels = dict(zip(labels,cls))
print("Splitting dataset into smaller images")
base_path = CONFIG.MISC_dataset_path
source = os.path.join(base_path,'dubai_ful')
destination = os.path.join(base_path,'dubai_out')
if os.path.exists(destination):
print('Destination file already exists')
else:
os.mkdir(destination)
os.mkdir(os.path.join(destination,'images'))
os.mkdir(os.path.join(destination,'labels'))
images_path = glob.glob(os.path.join(source,'*/images/*'))
masks_path = [i[:-25] +'masks'+i[-19:-3] + 'png' for i in images_path]
i = 1
for image_path,mask_path in zip(images_path,masks_path):
image = plt.imread(image_path)
mask = plt.imread(mask_path)
# mask = (mask[:,:,0]*255).astype(int)
# for i_h in range(mask.shape[0]):
# for i_w in range(mask.shape[1]):
# mask[i_h,i_w] = labels[mask[i_h,i_w]]
size = 500
n_H = math.floor(image.shape[0]/size)
n_W = math.floor(image.shape[1]/size)
#image = cv2.resize(image,(n_H*256,n_W*256))
#mask = cv2.resize(mask,(n_H*256,n_W*256))
for i_h in range(n_H):
for i_w in range(n_W):
im = image[i_h*size:(i_h+1)*size,i_w*size:(i_w+1)*size,:]
m = mask[i_h*size:(i_h+1)*size,i_w*size:(i_w+1)*size,:]
plt.imsave(destination + f"/images/{i:04d}.jpg",im)
plt.imsave(destination + f"/labels/{i:04d}.jpg",m)
print(f"{i:04d}")
i += 1
elif data == 'masa':
print("Splitting dataset into smaller images")
base_path = CONFIG.MISC_dataset_path
source = os.path.join(base_path,'masa_full')
destination = os.path.join(base_path,'masa_seven')
if os.path.exists(destination):
print('Destination file already exists')
else:
os.mkdir(destination)
os.mkdir(os.path.join(destination,'image'))
os.mkdir(os.path.join(destination,'label'))
images_path = glob.glob(os.path.join(source,'*tiff/test/*')) + glob.glob(os.path.join(source,'*tiff/train/*')) + glob.glob(os.path.join(source,'*tiff/val/*'))
pattern = '(train|val|test)'
masks_path = [re.sub(pattern, r'\g<1>_labels', i)[:-1] for i in images_path]
zro = []
i = 1
for image_path,mask_path in zip(images_path,masks_path):
image = plt.imread(image_path)
mask = plt.imread(mask_path)
# mask = (mask[:,:,0]*255).astype(int)
# for i_h in range(mask.shape[0]):
# for i_w in range(mask.shape[1]):
# mask[i_h,i_w] = labels[mask[i_h,i_w]]
dim = 700
n_H = math.floor(image.shape[0]/dim)
n_W = math.floor(image.shape[1]/dim)
#image = cv2.resize(image,(n_H*256,n_W*256))
#mask = cv2.resize(mask,(n_H*256,n_W*256))
for i_h in range(n_H):
for i_w in range(n_W):
im = image[i_h*dim:(i_h+1)*dim,i_w*dim:(i_w+1)*dim,:]
m = mask[i_h*dim:(i_h+1)*dim,i_w*dim:(i_w+1)*dim,:]
zro_tmp = (im.sum(axis=-1) == 765).sum()
if zro_tmp < 100:
plt.imsave(destination + f"/image/{i:04d}.jpg",im)
plt.imsave(destination + f"/label/{i:04d}.jpg",m)
print(f"{i:04d}")
i += 1
# if zro_tmp < 2000:
# zro += [zro_tmp]
# if zro_tmp >100 :
# plt.imshow(im)
# print(zro_tmp)
# plt.show()
#
# plt.hist(zro, bins=50)
# plt.show()
| 4,196 | 35.815789 | 162 |
py
|
airloc
|
airloc-master/utils/normalize_dataset.py
|
#!/bin/env python3
import os
import numpy as np
import imageio as iio
import time
import torch
import json
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split,Subset
from utils.dataset_utils import CustomDataset, Dubai, Masa, MasaFilt, MasaFull,MasaSeven, DubaiSeven, ImagesPre
from utils.utils import load_normalize_data
from config import CONFIG
import argparse
"""
Training set:
RGB-means [128.65051054 118.45636216 130.87956071]
RGB-stds [42.03129609 39.19244565 41.53636231]
"""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset" , type = str, default = CONFIG.MISC_dataset, help = "Select which dataset to normalize.")
args = parser.parse_args()
# Set dataset
dataset = args.dataset
print(f"Calculating the mean and std for {dataset}")
# Global vars
BASE_PATH_DATA = CONFIG.MISC_dataset_path
SEED = 0
batch_size = 1
n_chan = 3
transform = None
download = False
if dataset == 'masa':
transform = transforms.Compose([transforms.ToTensor()])
trainset = Masa(CONFIG.MISC_dataset_path,split = 'train',transform = transform)
elif dataset == 'masa_filt':
transform = transforms.Compose([transforms.ToTensor()])
trainset = MasaFilt(CONFIG.MISC_dataset_path,split = 'train',transform = transform)
elif dataset == 'dubai':
CONFIG.RL_priv_use_seg = True
trainset = Dubai(CONFIG.MISC_dataset_path,split = 'train',transform = transform)
n_chan = 6
elif dataset.startswith('custom_'):
trainset = CustomDataset(CONFIG.MISC_dataset_path, CONFIG.MISC_dataset[7:], split='train', transform = transform)
elif dataset == 'masa_full':
trainset = MasaFull(CONFIG.MISC_dataset_path , split = 'train' , transform = transform)
n_chan = 3
elif dataset == 'masa_seven':
trainset = MasaSeven(CONFIG.MISC_dataset_path, split = 'train', transform = transform)
n_chan = 3
elif dataset == 'dubai_seven':
trainset = DubaiSeven(CONFIG.MISC_dataset_path, split = 'train', transform = transform)
n_chan = 3
elif dataset == 'images_pre':
transform = transforms.Compose([transforms.ToTensor()])
trainset = ImagesPre(CONFIG.MISC_dataset_path, split = 'train', transform = transform)
n_chan = 3
elif dataset == 'images_post':
transform = transforms.Compose([transforms.ToTensor()])
trainset = ImagesPre(CONFIG.MISC_dataset_path, split = 'train', transform = transform, post_instead=True)
n_chan = 3
else:
raise(Exception("Unknown dataset"))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=download, num_workers=2 )
# Calculate mean and std of pixel values in dataset
# See this page for how to do it incrementally with respect to std
# https://stackoverflow.com/questions/5543651/computing-standard-deviation-in-a-stream
img_means = np.zeros(n_chan)
img_s0s = np.zeros(n_chan)
img_s1s = np.zeros(n_chan)
img_s2s = np.zeros(n_chan)
for it, frame in enumerate(trainloader):
frame = frame[0].detach().numpy()
IM_H ,IM_W = frame.shape[-2:]
# Update running means
for chan_idx in range(n_chan):
img_means[chan_idx] = (it * img_means[chan_idx] + np.mean(frame[:, chan_idx, :,:])) / (it + 1)
img_s0s[chan_idx] += IM_H * IM_W
img_s1s[chan_idx] += np.sum(frame[:, chan_idx, :,:])
img_s2s[chan_idx] += np.sum(frame[:, chan_idx, :,:] * frame[:, chan_idx, :,:])
# Display
if it % 100 == 0:
img_stds = np.sqrt(np.abs(((img_s0s * img_s2s) - img_s1s * img_s1s) / (img_s0s * (img_s0s - 1))))
print(f"Iter {it}/{len(trainloader)}")
print("RGB-means", img_means)
print("RGB-stds", img_stds)
dataset_path = dataset if not dataset.startswith('custom_') else os.path.join("Custom", dataset[7:])
img_stds = np.sqrt(np.abs(((img_s0s * img_s2s) - img_s1s * img_s1s) / (img_s0s * (img_s0s - 1))))
stat_path = os.path.join(CONFIG.MISC_dataset_path,dataset_path,'stats.json')
stats = {"means":list(img_means),"stds":list(img_stds)}
with open(stat_path, 'w') as fp:
json.dump(stats,fp,indent = 4)
print(f"Done!! \nThe mean and std for {dataset} is:")
print("RGB-means", img_means)
print("RGB-stds", img_stds)
| 4,167 | 37.592593 | 121 |
py
|
airloc
|
airloc-master/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
airloc
|
airloc-master/utils/stat_collector.py
|
import os
import sys
import numpy as np
from collections import OrderedDict
import gc
import matplotlib
import matplotlib.pyplot as plt
import math
def replace(string_in, replace_from, replace_to='_'):
if not isinstance(replace_from, list):
replace_from = [replace_from]
string_out = string_in
for replace_entry in replace_from:
string_out = string_out.replace(replace_entry, replace_to)
return string_out
class BaseStat():
"""
Basic statistic from which all other statistic types inherit
"""
def __init__(self, name):
self.name = name
self.ep_idx = 0
self.stat_collector = None
def collect(self, value):
pass
def get_data(self):
return {}
def next_step(self):
pass
def next_ep(self):
self.ep_idx += 1
def next_batch(self):
pass
def compute_mean(self, mean, value, counter):
return (counter * mean + value) / (counter + 1)
def compute_ma(self, ma, value, ma_weight):
return (1 - ma_weight) * ma + ma_weight * value
class AvgStat(BaseStat):
"""
Standard average statistic (can track total means, moving averages,
exponential moving averages etcetera)
"""
def __init__(self, name, coll_freq='ep', ma_weight=0.1):
super(AvgStat, self).__init__(name=name)
self.counter = 0
self.mean = 0.0
self.ma = 0.0
self.last = None
self.means = []
self.mas = []
self.values = []
self.times = []
self.coll_freq = coll_freq
self.ma_weight = ma_weight
def collect(self, value, delta_counter=1, allow_nans = True):
# NOTE : If value is NaN add last value
if np.isnan(value).any() and not allow_nans:
value = self.values[-1] if len(self.values) != 0 else 0
self.counter += delta_counter
self.values.append(value)
self.times.append(self.counter)
self.mean = self.compute_mean(self.mean, value, len(self.means))
self.means.append(self.mean)
if self.counter < 10:
# Want the ma to be more stable early on
self.ma = self.mean
else:
self.ma = self.compute_ma(self.ma, value, self.ma_weight)
self.mas.append(self.ma)
self.last = value
def get_data(self):
return {'times': self.times, 'means': self.means, 'mas': self.mas, 'values': self.values}
def add_last(self):
last_value = self.get_data['values'][-1]
self.collect(last_value)
def print(self,path=None,timestamp=None):
if self.counter <= 0:
return
self._print_helper(path=path)
def _print_helper(self, mean=None, ma=None, last=None, path=None):
if path is not None:
file = open(path,'a')
else:
file = sys.stdout
# Set defaults
if mean is None:
mean = self.mean
if ma is None:
ma = self.ma
if last is None:
last = self.last
if isinstance(mean, float):
print('Mean %-35s tot: %10.5f, ma: %10.5f, last: %10.5f' %
(self.name, mean, ma, last),file=file)
else:
try:
print('Mean %-35s tot: (%.3f' % (self.name, mean[0]), end='')
for i in range(1, mean.size - 1):
print(', %.3f' % mean[i], end='')
print(', %.3f)' % mean[-1])
print('%-40s ma: (%.3f' % ('', ma[0]), end='')
for i in range(1, ma.size - 1):
print(', %.3f' % ma[i], end='')
print(', %.3f)' % ma[-1])
print('%-40s last: (%.3f' % ('', last[0]), end='')
for i in range(1, last.size - 1):
print(', %.3f' % last[i], end='')
print(', %.3f)' % last[-1])
except:
pass
if path is not None:
file.close
def save(self, save_dir):
file_name = replace(self.name, [' ', '(', ')', '/'], '-')
file_name = replace(file_name, ['<', '>'], '')
file_name += '.npz'
np.savez(os.path.join(save_dir, file_name),
values=np.asarray(self.values), means=np.asarray(self.means),
mas=np.asarray(self.mas), times=np.asarray(self.times))
def plot(self, times=None, values=None, means=None, mas=None, save_dir=None):
# Set defaults
if times is None:
times = self.times
if values is None:
values = self.values
if means is None:
means = self.means
if mas is None:
mas = self.mas
if save_dir is None:
save_dir_given = None
save_dir = os.path.join(self.log_dir, 'stats', 'data')
else:
save_dir_given = save_dir
# Define x-label
if self.coll_freq == 'ep':
xlabel = 'episode'
elif self.coll_freq == 'step':
xlabel = 'step'
if np.asarray(values).ndim > 1:
# Plot all values
self._plot(times, values, self.name + ' all', xlabel, 'y', None,
save_dir_given)
# Plot total means
self._plot(times, means, self.name + ' total mean', xlabel, 'y', None,
save_dir_given)
# Plot moving averages
self._plot(times, mas, self.name + ' total exp ma', xlabel, 'y', None,
save_dir_given)
else:
self._plot_in_same(times, [values, means, mas],
self.name, xlabel, 'y',
['all-data', 'mean', 'ma'],
[None, '-.', '-'], [0.25, 1.0, 1.0],
save_dir_given)
# Also save current data to file
if save_dir_given is None:
file_name = replace(self.name, [' ', '(', ')', '/'], '-')
file_name = replace(file_name, ['<', '>'], '')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, file_name), values)
def _plot(self, x, y, title='plot', xlabel='x', ylabel='y', legend=None,
log_dir=None):
plt.figure()
plt.plot(x, y)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
if legend is None:
plt.legend([str(k) for k in range(np.asarray(y).shape[1])])
else:
plt.legend(legend)
title_to_save = replace(title, [' ', '(', ')', '/'], '-')
title_to_save = replace(title_to_save, ['<', '>'], '')
if log_dir is None:
log_dir = os.path.join(self.log_dir, 'stats', 'plots')
if not os.path.isdir(log_dir):
os.makedirs(log_dir, exist_ok=False)
plt.savefig(os.path.join(log_dir, title_to_save + '.png'))
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
def _plot_in_same(self, x, ys, title='plot', xlabel='x', ylabel='y',
legend=None, line_styles=None, alphas=None,
log_dir=None):
if alphas is None:
alphas = [1.0 for _ in range(len(ys))]
plt.figure()
for i in range(len(ys)):
if line_styles[i] is not None:
plt.plot(x, ys[i],
linestyle=line_styles[i], alpha=alphas[i])
else:
plt.plot(x, ys[i], 'yo', alpha=alphas[i])
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
if legend is None:
plt.legend([str(k) for k in range(np.asarray(y).shape[1])])
else:
plt.legend(legend)
title_to_save = replace(title, [' ', '(', ')', '/'], '-')
title_to_save = replace(title_to_save, ['<', '>'], '')
if log_dir is None:
log_dir = os.path.join(self.log_dir, 'stats', 'plots')
if not os.path.isdir(log_dir):
os.makedirs(log_dir, exist_ok=False)
plt.savefig(os.path.join(log_dir, title_to_save + '.png'))
plt.cla()
plt.clf()
plt.close('all')
gc.collect()
class StatCollector():
"""
Statistics collector class
"""
def __init__(self, log_dir, tot_nbr_steps, print_iter, exclude_prints = None):
self.stats = OrderedDict()
self.log_dir = log_dir
self.ep_idx = 0
self.step_idx = 0
self.epoch_idx = 0
self.print_iter = print_iter
self.tot_nbr_steps = tot_nbr_steps
self.exclude_prints = exclude_prints
def has_stat(self, name):
return name in self.stats
def register(self, name, stat_info):
if self.has_stat(name):
sys.exit("Stat already exists")
if stat_info['type'] == 'avg':
stat_obj = AvgStat(name, stat_info['freq'])
else:
sys.exit("Stat type not supported")
stat = {'obj': stat_obj, 'name': name, 'type': stat_info['type']}
self.stats[name] = stat
def s(self, name):
return self.stats[name]['obj']
def next_step(self):
self.step_idx += 1
def next_ep(self):
self.ep_idx += 1
for stat_name, stat in self.stats.items():
stat['obj'].next_ep()
if self.ep_idx % self.print_iter == 0:
self.print()
self._plot_to_hdock()
def print(self,path = None):
for stat_name, stat in self.stats.items():
if self.exclude_prints is None or stat_name not in self.exclude_prints:
stat['obj'].print(path=path)
def plot(self):
for stat_name, stat in self.stats.items():
stat['obj'].plot(save_dir=self.log_dir)
def save(self):
for stat_name, stat in self.stats.items():
stat['obj'].save(save_dir=self.log_dir)
| 9,955 | 31.429967 | 97 |
py
|
aabbcc
|
aabbcc-master/python/setup.py
|
#!/usr/bin/env python
"""
setup.py file for the AABB.cc python interface.
"""
from distutils.core import setup, Extension
aabb_module = Extension('_aabb',
sources = ['aabb_wrap.cxx', '../src/AABB.cc'],
extra_compile_args = ["-O3", "-std=c++11"],
)
setup (name = 'aabb',
author = 'Lester Hedges',
author_email = '[email protected]',
description = 'AABB.cc python wrapper',
ext_modules = [aabb_module],
py_modules = ['aabb'],
url = 'http://github.com/lohedges/aabbcc',
license = 'Zlib',
)
| 635 | 26.652174 | 71 |
py
|
aabbcc
|
aabbcc-master/python/hard_disc.py
|
#!/usr/bin/env python2
# Copyright (c) 2016-2018 Lester Hedges <[email protected]>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source distribution.
"""An example showing how to use the AABB.cc Python wrapper."""
# Note:
# SWIG allows us direct access to STL vectors in python. See aabb.i for
# full details of the mappings.
#
# As an example, you can create a STL vector containing 10 doubles
# as follows:
#
# doubleVector = aabb.VectorDouble(10)
#
# You can then access most of the usual member functions, e.g. to
# print the size of the vector:
#
# print doubleVector.size()
from __future__ import print_function
import aabb
import math
import random
# Test whether two discs overlap.
def overlaps(position1, position2, periodicity, boxSize, cutOff):
# Compute separation vector.
separation = [0] * 2
separation[0] = position1[0] - position2[0]
separation[1] = position1[1] - position2[1]
# Find minimum image separation.
minimumImage(separation, periodicity, boxSize)
# Squared distance between objects.
rSqd = separation[0]*separation[0] + separation[1]*separation[1]
if rSqd < cutOff:
return True
else:
return False
# Compute the minimum image separation vector between disc centres.
def minimumImage(separation, periodicity, boxSize):
for i in range(0, 2):
if separation[i] < -0.5*boxSize[i]:
separation[i] += periodicity[i]*boxSize[i]
elif separation[i] >= 0.5*boxSize[i]:
separation[i] -= periodicity[i]*boxSize[i]
# Apply periodic boundary conditions.
def periodicBoundaries(position, periodicity, boxSize):
for i in range(0, 2):
if position[i] < 0:
position[i] += periodicity[i]*boxSize[i]
elif position[i] >= boxSize[i]:
position[i] -= periodicity[i]*boxSize[i]
# Print current configuration to VMD trajectory file.
def printVMD(fileName, positionsSmall, positionsLarge):
with open(fileName, 'a') as trajectoryFile:
trajectoryFile.write('%lu\n' % (len(positionsSmall) + len(positionsLarge)))
trajectoryFile.write('\n')
for i in range(0, len(positionsSmall)):
trajectoryFile.write('0 %lf %lf 0\n' % (positionsSmall[i][0], positionsSmall[i][1]))
for i in range(0, len(positionsLarge)):
trajectoryFile.write('1 %lf %lf 0\n' % (positionsLarge[i][0], positionsLarge[i][1]))
#############################################################
# Set parameters, initialise variables and objects. #
#############################################################
nSweeps = 100000 # The number of Monte Carlo sweeps.
sampleInterval = 100 # The number of sweeps per sample.
nSmall = 1000 # The number of small particles.
nLarge = 100 # The number of large particles.
diameterSmall = 1 # The diameter of the small particles.
diameterLarge = 10 # The diameter of the large particles.
density = 0.1 # The system density
maxDisp = 0.1 # Maximum trial displacement (in units of diameter).
# Total particles.
nParticles = nSmall + nLarge
# Number of samples.
nSamples = math.floor(nSweeps / sampleInterval)
# Particle radii.
radiusSmall = 0.5 * diameterSmall
radiusLarge = 0.5 * diameterLarge
# Output formatting flag.
format = int(math.floor(math.log10(nSamples)))
# Set the periodicity of the simulation box.
periodicity = aabb.VectorBool(2)
periodicity[0] = True
periodicity[1] = True
# Work out base length of the simulation box.
baseLength = math.pow((math.pi*(nSmall*diameterSmall + nLarge*diameterLarge))/(4*density), 0.5)
boxSize = aabb.VectorDouble(2)
boxSize[0] = baseLength
boxSize[1] = baseLength
# Seed the random number generator.
random.seed()
# Initialise the AABB trees.
treeSmall = aabb.Tree(2, maxDisp, periodicity, boxSize, nSmall)
treeLarge = aabb.Tree(2, maxDisp, periodicity, boxSize, nLarge)
# Initialise particle position vectors.
positionsSmall = [[0 for i in range(2)] for j in range(nSmall)]
positionsLarge = [[0 for i in range(2)] for j in range(nLarge)]
#############################################################
# Generate the initial AABB trees. #
#############################################################
# First the large particles.
print('Inserting large particles into AABB tree ...')
# Cut-off distance.
cutOff = 2 * radiusLarge
cutOff *= cutOff
# Initialise the position vector.
position = aabb.VectorDouble(2)
# Initialise bounds vectors.
lowerBound = aabb.VectorDouble(2)
upperBound = aabb.VectorDouble(2)
for i in range(0, nLarge):
# Insert the first particle directly.
if i == 0:
# Generate a random particle position.
position[0] = boxSize[0]*random.random()
position[1] = boxSize[1]*random.random()
# Check for overlaps.
else:
# Initialise the overlap flag.
isOverlap = True
while isOverlap:
# Generate a random particle position.
position[0] = boxSize[0]*random.random()
position[1] = boxSize[1]*random.random()
# Compute the lower and upper AABB bounds.
lowerBound[0] = position[0] - radiusLarge
lowerBound[1] = position[1] - radiusLarge
upperBound[0] = position[0] + radiusLarge
upperBound[1] = position[1] + radiusLarge
# Generate the AABB.
AABB = aabb.AABB(lowerBound, upperBound)
# Query AABB overlaps.
particles = treeLarge.query(AABB)
# Flag as no overlap (yet).
isOverlap = False
# Test overlap.
for j in range(0, len(particles)):
if overlaps(position, positionsLarge[particles[j]], periodicity, boxSize, cutOff):
isOverlap = True
break
# Insert the particle into the tree.
treeLarge.insertParticle(i, position, radiusLarge)
# Store the position.
positionsLarge[i] = [position[0], position[1]]
print('Tree generated!')
# Now fill the gaps with the small particles.
print('\nInserting small particles into AABB tree ...')
for i in range(0, nSmall):
# Initialise the overlap flag.
isOverlap = True
# Keep trying until there is no overlap.
while isOverlap:
# Set the cut-off.
cutOff = radiusSmall + radiusLarge
cutOff *= cutOff
# Generate a random particle position.
position[0] = boxSize[0]*random.random()
position[1] = boxSize[1]*random.random()
# Compute the lower and upper AABB bounds.
lowerBound[0] = position[0] - radiusSmall
lowerBound[1] = position[1] - radiusSmall
upperBound[0] = position[0] + radiusSmall
upperBound[1] = position[1] + radiusSmall
# Generate the AABB.
AABB = aabb.AABB(lowerBound, upperBound)
# First query AABB overlaps with the large particles.
particles = treeLarge.query(AABB)
# Flag as no overlap (yet).
isOverlap = False
# Test overlap.
for j in range(0, len(particles)):
if overlaps(position, positionsLarge[particles[j]], periodicity, boxSize, cutOff):
isOverlap = True
break
# Advance to next overlap test.
if not isOverlap:
# Set the cut-off.
cutOff = radiusSmall + radiusSmall
cutOff *= cutOff
# No need to test the first particle.
if i > 0:
# Now query AABB overlaps with other small particles.
particles = treeSmall.query(AABB)
# Test overlap.
for j in range(0, len(particles)):
if overlaps(position, positionsSmall[particles[j]], periodicity, boxSize, cutOff):
isOverlap = True
break
# Insert the particle into the tree.
treeSmall.insertParticle(i, position, radiusSmall)
# Store the position.
positionsSmall[i] = [position[0], position[1]]
print('Tree generated!')
#############################################################
# Perform the dynamics, updating the tree as we go. #
#############################################################
# Clear the trajectory file.
open('trajectory.xyz', 'w').close()
print('\nRunning dynamics ...')
sampleFlag = 0
nSampled = 0
# Initialise the displacement vector.
displacement = [0] * 2
for i in range(0, nSweeps):
for j in range(0, nParticles):
# Choose a random particle.
particle = random.randint(0, nParticles-1)
# Determine the particle type
if particle < nSmall:
particleType = 0
radius = radiusSmall
displacement[0] = maxDisp*diameterSmall*(2*random.random() - 1)
displacement[1] = maxDisp*diameterSmall*(2*random.random() - 1)
position[0] = positionsSmall[particle][0] + displacement[0]
position[1] = positionsSmall[particle][1] + displacement[1]
else:
particleType = 1
particle -= nSmall
radius = radiusLarge
displacement[0] = maxDisp*diameterLarge*(2*random.random() - 1)
displacement[1] = maxDisp*diameterLarge*(2*random.random() - 1)
position[0] = positionsLarge[particle][0] + displacement[0]
position[1] = positionsLarge[particle][1] + displacement[1]
# Apply periodic boundary conditions.
periodicBoundaries(position, periodicity, boxSize)
# Compute the AABB bounds.
lowerBound[0] = position[0] - radius
lowerBound[1] = position[1] - radius
upperBound[0] = position[0] + radius
upperBound[1] = position[1] + radius
# Generate the AABB.
AABB = aabb.AABB(lowerBound, upperBound)
# Query AABB overlaps with small particles.
particles = treeSmall.query(AABB)
# Flag as no overlap (yet).
isOverlap = False
# Set the cut-off
cutOff = radius + radiusSmall
cutOff *= cutOff
# Test overlap.
for k in range(0, len(particles)):
# Don't test self overlap.
if particleType == 1 or particles[k] != particle:
if overlaps(position, positionsSmall[particles[k]], periodicity, boxSize, cutOff):
isOverlap = True
break
# Advance to next overlap test.
if not isOverlap:
# Now query AABB overlaps with the large particles.
particles = treeLarge.query(AABB)
# Set the cut-off.
cutOff = radius + radiusLarge
cutOff *= cutOff
# Test overlap.
for k in range(0, len(particles)):
# Don't test self overlap.
if particleType == 0 or particles[k] != particle:
if overlaps(position, positionsLarge[particles[k]], periodicity, boxSize, cutOff):
isOverlap = True
break
# Accept the move.
if not isOverlap:
# Update the position and AABB tree.
if particleType == 0:
positionsSmall[particle] = [position[0], position[1]]
treeSmall.updateParticle(particle, lowerBound, upperBound)
else:
positionsLarge[particle] = [position[0], position[1]]
treeLarge.updateParticle(particle, lowerBound, upperBound)
sampleFlag += 1
# Print info to screen and append trajectory file.
if sampleFlag == sampleInterval:
sampleFlag = 0
nSampled += 1
printVMD('trajectory.xyz', positionsSmall, positionsLarge)
if format == 1:
print('Saved configuration %2d of %2d' % (nSampled, nSamples))
elif format == 2:
print('Saved configuration %3d of %3d' % (nSampled, nSamples))
elif format == 3:
print('Saved configuration %4d of %4d' % (nSampled, nSamples))
elif format == 4:
print('Saved configuration %5d of %5d' % (nSampled, nSamples))
elif format == 5:
print('Saved configuration %6d of %6d' % (nSampled, nSamples))
print('Done!')
| 13,088 | 33.444737 | 102 |
py
|
emcee3
|
emcee3-master/benchmark.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import time
import numpy as np
import emcee
import emcee3
def lnprob(x):
return -0.5 * np.sum(x**2)
N = 5000
coords = np.random.randn(56, 10)
emcee_sampler = emcee.EnsembleSampler(coords.shape[0], coords.shape[1],
lnprob)
strt = time.time()
emcee_sampler.run_mcmc(coords, N)
print("emcee took {0} seconds".format(time.time() - strt))
emcee3_sampler = emcee3.Sampler()
ens = emcee3.Ensemble(emcee3.Model(lnprob), coords)
strt = time.time()
emcee3_sampler.run(ens, N)
print("emcee3 took {0} seconds".format(time.time() - strt))
| 674 | 18.852941 | 71 |
py
|
emcee3
|
emcee3-master/setup.py
|
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
if "test" in sys.argv:
version = "0.0.0"
else:
# Hackishly inject a constant into builtins to enable importing of the
# package even if numpy isn't installed. Only do this if we're not
# running the tests!
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__EMCEE3_SETUP__ = True
import emcee3
version = emcee3.__version__
# Publish to PyPI.
if "publish" in sys.argv:
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
sys.exit()
# Push a new tag to GitHub.
if "tag" in sys.argv:
os.system("git tag -a {0} -m 'version {0}'".format(version))
os.system("git push --tags")
sys.exit()
# Testing.
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name="emcee3",
version=version,
author="Daniel Foreman-Mackey",
author_email="[email protected]",
packages=[
"emcee3",
"emcee3.moves",
"emcee3.samplers",
"emcee3.pools",
"emcee3.backends",
],
url="http://dan.iel.fm/emcee3/",
license="MIT",
description="Kick ass affine-invariant ensemble MCMC sampling",
long_description=(open("README.rst").read() + "\n\n"
+ "Changelog\n"
+ "---------\n\n"
+ open("HISTORY.rst").read()),
package_data={"": ["LICENSE", "*.rst"]},
include_package_data=True,
install_requires=[
"numpy >= 1.7"
],
tests_require=[
"pytest",
"pytest-cov",
],
cmdclass = {"test": PyTest},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
)
| 2,752 | 27.091837 | 74 |
py
|
emcee3
|
emcee3-master/emcee3/autocorr.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import logging
import numpy as np
__all__ = ["function", "integrated_time", "AutocorrError"]
def function(x, axis=0, fast=False):
"""Estimate the autocorrelation function of a time series using the FFT.
Args:
x: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
axis (Optional[int]): The time axis of ``x``. Assumed to be the first
axis if not specified.
fast (Optional[bool]): If ``True``, only use the first ``2^n`` (for
the largest power) entries for efficiency. (default: False)
Returns:
array: The autocorrelation function of the time series.
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x - np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m]
def integrated_time(x, low=10, high=None, step=1, c=10, full_output=False,
axis=0, fast=False, quiet=False):
"""Estimate the integrated autocorrelation time of a time series.
This estimate uses the iterative procedure described on page 16 of `Sokal's
notes <http://www.stat.unc.edu/faculty/cji/Sokal.pdf>`_ to determine a
reasonable window size.
Args:
x: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
low (Optional[int]): The minimum window size to test. (default: ``10``)
high (Optional[int]): The maximum window size to test. (default:
``x.shape[axis] / (2*c)``)
step (Optional[int]): The step size for the window search. (default:
``1``)
c (Optional[float]): The minimum number of autocorrelation times
needed to trust the estimate. (default: ``10``)
full_output (Optional[bool]): Return the final window size as well as
the autocorrelation time. (default: ``False``)
axis (Optional[int]): The time axis of ``x``. Assumed to be the first
axis if not specified.
fast (Optional[bool]): If ``True``, only use the first ``2^n`` (for
the largest power) entries for efficiency. (default: False)
quiet (Optional[bool]): If ``True``, silence the ``AutocorrError``
that should occur if the chain is too short for a reliable
estimate and return ``None`` instead. (default: False)
Returns:
float or array: An estimate of the integrated autocorrelation time of
the time series ``x`` computed along the axis ``axis``.
Optional[int]: The final window size that was used. Only returned if
``full_output`` is ``True``.
Raises
AutocorrError: If the autocorrelation time can't be reliably estimated
from the chain. This normally means that the chain is too short.
"""
size = 0.5 * x.shape[axis]
if c * low >= size:
raise AutocorrError("The chain is too short")
# Compute the autocorrelation function.
f = function(x, axis=axis, fast=fast)
# Check the dimensions of the array.
oned = len(f.shape) == 1
m = [slice(None), ] * len(f.shape)
# Loop over proposed window sizes until convergence is reached.
if high is None:
high = int(0.5 * size)
tau = None
for M in np.arange(low, high, step).astype(int):
# Compute the autocorrelation time with the given window.
if oned:
# Special case 1D for simplicity.
tau = 1 + 2 * np.sum(f[1:M])
else:
# N-dimensional case.
m[axis] = slice(1, M)
tau = 1 + 2 * np.sum(f[m], axis=axis)
# Accept the window size if it satisfies the convergence criterion.
if np.all(tau > 1.0) and M > c * tau.max():
if full_output:
return tau, M
return tau
# If the autocorrelation time is too long to be estimated reliably
# from the chain, it should fail.
if c * tau.max() >= size:
break
msg = ("The chain is too short to reliably estimate the autocorrelation "
"time.")
if tau is not None:
msg += " Current estimate: \n{0}".format(tau)
if quiet:
logging.warn(msg)
return None
raise AutocorrError(msg)
class AutocorrError(Exception):
"""Raised if the chain is too short to estimate an autocorrelation time.
"""
pass
| 5,041 | 35.536232 | 79 |
py
|
emcee3
|
emcee3-master/emcee3/state.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
__all__ = ["State"]
class State(object):
"""The current state of a walker.
This object captures the state of a walker. It will store the coordinates,
probabilities, and any other computed metadata. Metadata can be added by
simply adding an attribute to a :class:`State` object. Any attributes with
names that don't start with an underscore will be serialized by the
:func:`to_array` method.
Note:
Unless the ``accepted`` attribute is ``True``, this object can't
be expected to have the correct data type.
Args:
coords (array[ndim]): The coordinate vector of the walker's state.
log_prior (Optional[float]): The log prior evaluated at ``coords``. If
not provided, it is expected that the
:func:`Model.compute_log_prior` method of a model will save the
``log_prior`` attribute on this object.
log_likelihood (Optional[float]): The log likelihood evaluated at
``coords``. Like ``log_prior``, this should be evaluated by the
model.
accepted (Optional[bool]): Was this proposal accepted?
**kwargs: Any other values to store as metadata.
"""
def __init__(self,
coords,
log_prior=-np.inf,
log_likelihood=-np.inf,
accepted=False,
**metadata):
self.coords = coords
self.log_prior = log_prior
self.log_likelihood = log_likelihood
self.accepted = accepted
for k, v in metadata.items():
setattr(self, k, v)
def __repr__(self):
names = self.dtype.names
values = [
self.log_prior, self.log_likelihood, self.accepted
] + [getattr(self, k) for k in names[4:]]
r = ", ".join("{0}={1!r}".format(a, b)
for a, b in zip(names[1:], values))
return "State({0!r}, {1})".format(self.coords, r)
def __eq__(self, other):
if not self.dtype == other.dtype:
return False
return np.all(self.to_array() == other.to_array())
@property
def dtype(self):
base_columns = ["coords", "log_prior", "log_likelihood", "accepted",
"grad_log_prior", "grad_log_likelihood"]
columns = []
for k, v in sorted(self.__dict__.items()):
if k.startswith("_") or k in base_columns:
continue
v = np.atleast_1d(v)
if v.shape == (1,):
columns.append((k, v.dtype))
else:
columns.append((k, v.dtype, v.shape))
return np.dtype([
("coords", np.float64, (len(self.coords),)),
("log_prior", np.float64),
("log_likelihood", np.float64),
("accepted", bool),
] + columns)
def to_array(self, out=None):
"""Serialize the state to a structured numpy array representation.
This representation will include all attributes of this instance that
don't have a name beginning with an underscore. There will also always
be special fields: ``coords``, ``log_prior``, ``log_likelihood``, and
``accepted``.
Args:
out (Optional[array]): If provided, the state will be serialized
in place.
Returns:
array: The serialized state.
"""
if out is None:
out = np.empty(1, self.dtype)
for k in out.dtype.names:
if k.startswith("_"):
continue
out[k] = getattr(self, k)
out["coords"] = self.coords
out["log_prior"] = self.log_prior
out["log_likelihood"] = self.log_likelihood
out["accepted"] = self.accepted
return out
@classmethod
def from_array(cls, array):
"""Reconstruct a saved state from a structured numpy array.
Args:
array (array): An array produced by serializing a state using the
:func:`to_array` method.
Returns:
State: The reconstructed state.
"""
self = cls(array["coords"][0],
log_prior=array["log_prior"][0],
log_likelihood=array["log_likelihood"][0],
accepted=array["accepted"][0])
for k in array.dtype.names:
if k.startswith("_"):
continue
setattr(self, k, array[k][0])
return self
@property
def log_probability(self):
"""A helper attribute that provides access to the log probability.
"""
return self.log_prior + self.log_likelihood
@property
def grad_log_probability(self):
"""A helper attribute that provides access to the log probability.
"""
return self.grad_log_prior + self.grad_log_likelihood
| 4,948 | 32.439189 | 78 |
py
|
emcee3
|
emcee3-master/emcee3/model.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from .state import State
from .numgrad import numerical_gradient_1, numerical_gradient_2
__all__ = ["is_model", "Model", "SimpleModel"]
def is_model(model):
return (
hasattr(model, "compute_log_probability") and
callable(model.compute_log_probability)
)
class Model(object):
"""An interface to a probabilistic model.
Subclasses should overload the :func:`Model.compute_log_prior` and
:func:`Model.compute_log_likelihood` methods to expose the model. To use
any Hamiltonian moves (:class:`moves.HMCMove`, :class:`moves.NUTSMove`,
etc.), :func:`Model.compute_grad_log_prior` and
:func:`Model.compute_grad_log_likelihood` should also be implemented. Each
of these methods should take a :class:`State` as input, update the relevant
attributes in place, and return the updated :class:`State`.
"""
def compute_log_prior(self, state, **kwargs):
"""Compute the log prior probability of the model.
Since this method is called first, the input ``state`` can only be
expected to have a ``coords`` attribute. Subclasses should implement
this method and overwrite the ``log_prior`` attribute on the input
``state``.
Args:
state (State): The current state.
Returns:
State: The updated state.
"""
raise NotImplementedError("'compute_log_prior' must be implemented by "
"subclasses")
def compute_grad_log_prior(self, state, **kwargs):
"""Compute the gradient of the log prior with respect to coords.
Subclasses should implement this method and overwrite the
``grad_log_prior`` attribute on the input ``state``.
Args:
state (State): The current state.
Returns:
State: The updated state.
"""
raise NotImplementedError("'compute_grad_log_prior' must be "
"implemented by subclasses")
def compute_log_likelihood(self, state, **kwargs):
"""Compute the log likelihood of the model.
This method should always be called after :func:`compute_log_prior`.
Subclasses should implement this method and overwrite the
``log_likelihood`` attribute on the input ``state``.
Args:
state (State): The current state.
Returns:
State: The updated state.
"""
raise NotImplementedError("'compute_log_likelihood' must be "
"implemented by subclasses")
def compute_grad_log_likelihood(self, state, **kwargs):
"""Compute the gradient of the log likelihood with respect to coords.
Subclasses should implement this method and overwrite the
``grad_log_likelihood`` attribute on the input ``state``.
Args:
state (State): The current state.
Returns:
State: The updated state.
"""
raise NotImplementedError("'compute_grad_log_likelihood' must be "
"implemented by subclasses")
def compute_log_probability(self, state, **kwargs):
"""Compute the log probability of the model.
Subclasses won't generally need to overload this method. Instead,
:func:`compute_log_prior` and :func:`compute_log_likelihood` should be
implemented.
Args:
state (State): The current state.
Returns:
State: The updated state.
"""
# Compute the prior.
state = self.compute_log_prior(state, **kwargs)
if not np.isfinite(state.log_prior):
state.log_prior = -np.inf
return state
# Compute the likelihood.
state = self.compute_log_likelihood(state, **kwargs)
if not np.isfinite(state.log_likelihood):
state.log_likelihood = -np.inf
return state
def __call__(self, coords, **kwargs):
state = State(coords)
return self.compute_log_probability(state, **kwargs)
def compute_grad_log_probability(self, state, **kwargs):
"""Compute the gradient of the log probability of the model.
Subclasses won't generally need to overload this method. Instead,
:func:`compute_grad_log_prior` and :func:`compute_grad_log_likelihood`
should be implemented.
Args:
state (State): The current state.
Returns:
State: The updated state.
"""
state.grad_log_likelihood = np.zeros(len(state.coords))
state = self.compute_grad_log_prior(state, **kwargs)
if not np.all(np.isfinite(state.grad_log_prior)):
state.grad_log_prior = np.zeros(len(state.coords))
return state
state = self.compute_grad_log_likelihood(state, **kwargs)
if not np.all(np.isfinite(state.grad_log_likelihood)):
state.grad_log_likelihood = np.zeros(len(state.coords))
return state
class SimpleModel(Model):
"""The simplest modeling interface.
This model interface wraps functions describing the components of the
model. At a minimum, a function evaluating the log likelihood (up to a
constant) must be provided. In this case, the prior function is assumed to
be uniform and improper. All functions must have the call structure::
log_likelihood(coords, *args)
where ``coords`` is a coordinate vector and ``*args`` can be provided
using the ``args`` keyword argument. The ``log_likelihood`` and
``log_prior`` functions should return scalars and the ``grad_*`` functions
should return ``numpy.array`` objects of the same length as the input
``coords``.
Args:
log_likelihood (callable): A function that evaluates the log
likelihood of the model.
log_prior (Optional[callable]): A function that evaluates the log
prior of the model. If not provided, this will be assumed to be
uniform and improper.
grad_log_likelihood (Optional[callable]): A function that evaluates the
gradient of the log likelihood of the model. If needed but not
provided, this will be evaluated numerically using a first order
method.
grad_log_prior (Optional[callable]): A function that evaluates the
gradient of the log prior of the model. If needed but not
provided, this will be evaluated numerically using a first order
method.
args (Optional[tuple]): Any other arguments to be provided to the
probability functions.
"""
def __init__(self,
log_likelihood,
log_prior=None,
grad_log_likelihood=None,
grad_log_prior=None,
args=tuple()):
# If no prior function is provided, we'll assume it to be flat and
# improper.
if log_prior is None:
log_prior = default_log_prior_function
grad_log_prior = default_grad_log_prior_function
self.log_prior_func = log_prior
self.log_likelihood_func = log_likelihood
self.args = args
# By default, numerically compute gradients.
if grad_log_prior is None:
grad_log_prior = numerical_gradient_1(self.log_prior_func)
self.grad_log_prior_func = grad_log_prior
if grad_log_likelihood is None:
grad_log_likelihood = numerical_gradient_1(
self.log_likelihood_func)
self.grad_log_likelihood_func = grad_log_likelihood
def compute_log_prior(self, state, **kwargs):
state.log_prior = self.log_prior_func(
state.coords, *(self.args)
)
return state
compute_log_prior.__doc__ = Model.compute_log_prior.__doc__
def compute_grad_log_prior(self, state, **kwargs):
state.grad_log_prior = self.grad_log_prior_func(
state.coords, *(self.args)
)
return state
compute_grad_log_prior.__doc__ = Model.compute_grad_log_prior.__doc__
def compute_log_likelihood(self, state, **kwargs):
state.log_likelihood = self.log_likelihood_func(state.coords,
*(self.args))
return state
compute_log_likelihood.__doc__ = Model.compute_log_likelihood.__doc__
def compute_grad_log_likelihood(self, state, **kwargs):
state.grad_log_likelihood = self.grad_log_likelihood_func(
state.coords, *(self.args)
)
return state
compute_grad_log_likelihood.__doc__ = \
Model.compute_grad_log_likelihood.__doc__
def check_grad(self, coords, **kwargs):
"""Check the gradients numerically.
Args:
coords (array): The coordinates.
Returns:
bool: If the numerical gradients and analytic gradients satisfy
``numpy.allclose``.
"""
com_g = (
self.grad_log_likelihood_func(coords, *(self.args)) +
self.grad_log_prior_func(coords, *(self.args))
)
num_g = numerical_gradient_2(self.get_lnprob, coords, **kwargs)
return np.allclose(com_g, num_g)
def default_log_prior_function(x, *args):
"""A uniform improper prior."""
return 0.0
def default_grad_log_prior_function(x, *args):
"""The gradient of a uniform improper prior."""
return np.zeros(len(x))
| 9,545 | 34.225092 | 79 |
py
|
emcee3
|
emcee3-master/emcee3/__init__.py
|
# -*- coding: utf-8 -*-
__version__ = "3.0.0.dev0"
try:
__EMCEE3_SETUP__
except NameError:
__EMCEE3_SETUP__ = False
if not __EMCEE3_SETUP__:
__all__ = [
"moves",
"pools",
"autocorr",
"Model",
"SimpleModel",
"Sampler",
"Ensemble",
"State",
]
from . import moves, pools, autocorr
from .model import Model, SimpleModel
from .ensemble import Ensemble
from .samplers import Sampler
from .state import State
| 506 | 17.777778 | 41 |
py
|
emcee3
|
emcee3-master/emcee3/ensemble.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from .pools import DefaultPool
from .model import is_model, SimpleModel
__all__ = ["Ensemble"]
class Ensemble(object):
"""The state of the ensemble of walkers.
Args:
model (callable or Model): The model specification. This can be a
callable, an instance of :class:`SimpleModel`, or another instance
that quacks like a :class:`Model`. If ``model`` is a callable, it
should take a coordinate vector as input and return the evaluated
log-probability up to an additive constant.
coords (Optional[array[nwalkers, ndim]]): The array of walker
coordinate vectors.
pool (Optional): A pool object that exposes a map function for
parallelization purposes.
random (Optional): A numpy-compatible random number generator. By
default, this will be the built-in ``numpy.random`` module but if
you want the ensemble to own its own state, you can supply an
instance of ``numpy.random.RandomState``.
"""
def __init__(self, model, coords, pool=None, random=None):
if is_model(model):
self.model = model
else:
if not callable(model):
raise ValueError("the 'model' must have a "
"'compute_log_probability' method or be "
"callable")
self.model = SimpleModel(model)
self.pool = DefaultPool() if pool is None else pool
if random is None:
self.random = np.random.RandomState()
self.random.set_state(np.random.get_state())
else:
self.random = random
# Interpret the dimensions of the ensemble.
self._coords = np.atleast_1d(coords).astype(np.float64)
if not len(self._coords.shape) == 2:
raise ValueError("Invalid ensemble coordinate dimensions")
self.nwalkers, self.ndim = self._coords.shape
# Initialize the walkers at these coordinates.
self.walkers = self.propose(self._coords)
self.acceptance = np.ones(self.nwalkers, dtype=bool)
if not np.all(np.isfinite(self.log_probability)):
raise ValueError("invalid or zero-probability coordinates")
def propose(self, coords):
"""Given a new set of coordinates return arrays of log-prior and
log-likelihood values.
Args:
coords (array[nwalkers, ndim]): The new coordinate matrix.
Returns:
list: A list of walker :class:`State` objects evaluated at the
specified coordinates.
"""
return list(self.pool.map(self.model, coords))
def update(self, walkers, subset=None):
"""Update the coordinate matrix and probability containers given the
current list of walkers. Moves should call this after proposing and
accepting the walkers.
Note:
Only the walkers with ``accepted == True`` are updated.
Args:
walkers (list[State]): A list of walkers states.
subset: If provided, ``walkers`` only corresponds to the indicated
subset of the walkers.
Raises:
RuntimeError: If an invalid state is accepted.
"""
if subset is None:
subset = slice(None)
for j, s in zip(np.arange(self.nwalkers)[subset], walkers):
self.acceptance[j] = s.accepted
if s.accepted:
self.walkers[j] = s
if not np.isfinite(s.log_probability):
raise RuntimeError("invalid or zero-probability proposal "
"accepted")
def __getstate__(self):
# In order to be generally picklable, we need to discard the pool
# object before trying.
d = self.__dict__
d.pop("pool", None)
return d
def __setstate__(self, state):
self.__dict__ = state
self.pool = DefaultPool()
def __len__(self):
return self.nwalkers
@property
def dtype(self):
return self.walkers[0].dtype
def __getitem__(self, key):
if isinstance(key, int):
return self.walkers[key]
try:
return self.get_value(key)
except (AttributeError, TypeError):
return self.walkers[key]
def get_value(self, key, out=None):
if out is None:
v = np.asarray(getattr(self.walkers[0], key))
out = np.empty((self.nwalkers, ) + v.shape, dtype=v.dtype)
for i, s in enumerate(self.walkers):
out[i] = getattr(s, key)
return out
def __getattr__(self, key):
return self.get_value(key)
| 4,828 | 33.741007 | 78 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.