repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
kraken
|
kraken-main/tests/test_merging.py
|
from kraken.lib.train import RecognitionModel
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.default_specs import RECOGNITION_HYPER_PARAMS
#from kraken.ketos.util import _expand_gt
from pathlib import Path
from unittest import TestCase
from unicodedata import normalize
_here = Path(__file__).parent
base_model = _here.joinpath(Path("./resources/merge_tests/merge_codec_nfd.mlmodel"))
training_data = [str(_here.joinpath(Path("./resources/merge_tests/merger.arrow")))]
xml_data = [str(_here.joinpath(Path("./resources/merge_tests/0014.xml")))]
class TestMerging(TestCase):
"""
Testing merging and fine-tuning models with previous codecs
The base model is trained with 0006.gt.txt and 0007.gt.txt (base.arrow)
The merger.arrow is composed of 0008.gt.txt and 0021.gt.txt.
This expands on test_train to test unicode normalization on top of `resize`
"""
def _get_model(self, new_hyp_params=None, **generic_params):
hyp_params = RECOGNITION_HYPER_PARAMS.copy()
if new_hyp_params:
hyp_params.update(new_hyp_params)
params = dict(
hyper_params=hyp_params,
model=base_model,
training_data=training_data,
num_workers=1,
format_type="binary",
resize="fail"
)
if generic_params:
params.update(**generic_params)
return RecognitionModel(**params)
def test_no_resize_fails(self):
""" Asserts that not setting up resize fails to fit """
model = self._get_model()
with self.assertRaises(KrakenInputException) as E:
model.setup("fit")
def test_merging_new(self):
""" Asserts that new, which only takes into account new data, works as intended """
model = self._get_model(resize="new")
model.setup("fit")
self.assertEqual(
model.nn.codec.encode("1").shape, (0, ),
"1 is unknown to the original model and the second dataset, produces nothing"
)
self.assertEqual(
model.nn.codec.encode("9").shape, (1, ),
"9 is known to the new dataset and should be encoded through `new`"
)
self.assertEqual(
model.nn.codec.encode("x").shape, (0, ),
"x is known to the loaded model and shouldn't be encoded through `new`"
)
def test_merging_union(self):
""" Asserts that union, which only takes into account new the original codec and the new data,
works as intended
"""
model = self._get_model(resize="union")
model.setup("fit")
self.assertEqual(
model.nn.codec.encode("1").shape, (0, ),
"1 is unknown to the original model and the second dataset, produces nothing"
)
self.assertEqual(
model.nn.codec.encode("9").shape, (1, ),
"9 is known to the new dataset and should be encoded through `new`"
)
self.assertEqual(
model.nn.codec.encode("x").shape, (1, ),
"x is known to the loaded model and should be encoded through `new`"
)
def test_merging_union_with_nfd(self):
""" Asserts that union, which only takes into account new the original codec and the new data,
works as intended
"""
model = self._get_model(resize="union", new_hyp_params={"normalization": "NFD"})
model.setup("fit")
self.assertEqual(
model.nn.codec.encode("1").shape, (0, ),
"1 is unknown to the original model and the second dataset, produces nothing"
)
self.assertEqual(
model.nn.codec.encode("9").shape, (1, ),
"9 is known to the new dataset and should be encoded through `union`"
)
self.assertEqual(
model.nn.codec.encode("x").shape, (1, ),
"x is known to the loaded model and should be encoded through `union`"
)
self.assertEqual(
model.nn.codec.encode("ẽ").shape, (0, ),
"ẽ (unnormalized) should not work in `union` mode because it should be split in two"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "ẽ")).shape, (2, ),
"ẽ should work in `union` mode because it should be split in two and is in the training data"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "Ũ")).shape, (2, ),
"Ũ should work in `union` mode because it should be split in two and is in the training data and the "
"original model"
)
def test_merging_new_with_NFD(self):
""" Asserts that new, which only takes into account new data, works as intended """
model = self._get_model(resize="new", new_hyp_params={"normalization": "NFD"})
model.setup("fit")
self.assertEqual(
model.nn.codec.encode("1").shape, (0, ),
"1 is unknown to the original model and the second dataset, produces nothing"
)
self.assertEqual(
model.nn.codec.encode("9").shape, (1, ),
"9 is known to the new dataset and should be encoded through `new`"
)
self.assertEqual(
model.nn.codec.encode("x").shape, (0, ),
"x is only known to the loaded model and shouldn't be encoded through `new`"
)
self.assertEqual(
model.nn.codec.encode("ẽ").shape, (0, ),
"ẽ (unnormalized) should not work in `new` mode because it should be split in two"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "ẽ")).shape, (2, ),
"ẽ should work in `new` mode because it should be split in two and is in the training data"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "Ũ")).shape, (1, ),
"Ũ should not work in `union` mode because it should be split in two and U is only in the original model"
)
def test_merging_new_with_NFD_two_different_kind_of_dataset(self):
""" Asserts that new, which only takes into account new data, works as intended, including with XML Dataset """
model = self._get_model(resize="new", format_type="xml",
training_data=xml_data, new_hyp_params={"normalization": "NFD"})
model.setup("fit")
self.assertEqual(
model.nn.codec.encode("1").shape, (0, ),
"1 is unknown to the original model and the second dataset, produces nothing"
)
self.assertEqual(
model.nn.codec.encode("9").shape, (1, ),
"9 is known to the new dataset and should be encoded through `new`"
)
self.assertEqual(
model.nn.codec.encode("x").shape, (0, ),
"x is known to the loaded model and shouldn't be encoded through `new`"
)
self.assertEqual(
model.nn.codec.encode("ẽ").shape, (0, ),
"ẽ (unnormalized) should not work in `new` mode because it should be split in two"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "ẽ")).shape, (2, ),
"ẽ should work in `new` mode because it should be split in two and is in the training data"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "Ũ")).shape, (1, ),
"Ũ should not work in `new` mode because it should be split in two and U is only in the original model"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "ã")).shape, (2, ),
"ã should work in `new` mode because it should be split in two"
)
def test_merging_union_with_NFD_two_different_kind_of_dataset(self):
""" Asserts that union works as intended, including with XML Dataset """
model = self._get_model(resize="union", format_type="xml",
training_data=xml_data, new_hyp_params={"normalization": "NFD"})
model.setup("fit")
self.assertEqual(
model.nn.codec.encode("1").shape, (0, ),
"1 is unknown to the original model and the second dataset, produces nothing"
)
self.assertEqual(
model.nn.codec.encode("9").shape, (1, ),
"9 is known to the new dataset and should be encoded through `union`"
)
self.assertEqual(
model.nn.codec.encode("x").shape, (1, ),
"x is known to the loaded model and should be encoded through `union`"
)
self.assertEqual(
model.nn.codec.encode("ẽ").shape, (0, ),
"ẽ (unnormalized) should not work in `union`+NFD mode because it should be split in two"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "ẽ")).shape, (2, ),
"ẽ should work in `union` mode because it should be split in two and is in the training data"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "Ũ")).shape, (2, ),
"Ũ should work in `union` mode because it should be split in two and U is in the original model"
)
self.assertEqual(
model.nn.codec.encode(normalize("NFD", "ã")).shape, (2, ),
"ã should work in `union` mode because it should be split in two"
)
| 9,410 | 42.976636 | 119 |
py
|
kraken
|
kraken-main/tests/test_dataset.py
|
# -*- coding: utf-8 -*-
import unittest
from pathlib import Path
from pytest import raises
from PIL import Image
from kraken.lib.dataset import ImageInputTransforms, BaselineSet
from kraken.lib.util import is_bitonal
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
def check_output(self, config, im, output_tensor):
if config['height'] != 0:
self.assertEqual(config['height'], output_tensor.shape[1])
if config['width'] != 0:
self.assertEqual(config['width'], output_tensor.shape[2])
if config['force_binarization'] or is_bitonal(im):
self.assertEqual(len(output_tensor.int().unique()), 2)
if config['channels'] == 3:
self.assertEqual(output_tensor.shape[0], 3)
class TestBaselineSet(unittest.TestCase):
"""
Tests for the BaselineSet segmentation dataset class
"""
def setUp(self):
self.doc = resources / '170025120000003,0074.xml'
self.transforms = ImageInputTransforms(batch=1,
height=200,
width=100,
channels=1,
pad=0)
def test_baselineset_simple_xml(self):
"""
Tests simple BaselineSet instantiation
"""
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 10)
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_simple_valid_baselines(self):
"""
Test baseline whitelisting in BaselineSet
"""
# filter out $pac and $pag baseline classes
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_baselines=['$par', '$tip'],
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 8)
self.assertEqual(set(ds.class_mapping['baselines'].keys()), set(('$tip', '$par')))
self.assertNotIn('$pac', ds.class_mapping['baselines'])
self.assertNotIn('$pag', ds.class_mapping['baselines'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_simple_valid_regions(self):
"""
Test region whitelisting in BaselineSet
"""
# filter out $tip and $par regions
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_regions=['$pag', '$pac'],
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 8)
self.assertEqual(set(ds.class_mapping['regions'].keys()), set(('$pag', '$pac')))
self.assertNotIn('$par', ds.class_mapping['regions'])
self.assertNotIn('$tip', ds.class_mapping['regions'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_simple_merge_baselines(self):
"""
Test baseline merging in BaselineSet
"""
# merge $par into $tip
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
merge_baselines={'$par': '$tip'},
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 9)
self.assertEqual(set(ds.class_mapping['baselines'].keys()), set(('$tip', '$pag', '$pac')))
self.assertEqual(len(ds.targets[0]['baselines']['$tip']), 18)
self.assertNotIn('$par', ds.class_mapping['baselines'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_merge_after_valid_baselines(self):
"""
Test that filtering with valid_baselines occurs before merging.
"""
# merge $par and $pac into $tip but discard $par before
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_baselines=('$tip', '$pac'),
merge_baselines={'$par': '$tip', '$pac': '$tip'},
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 7)
self.assertEqual(set(ds.class_mapping['baselines'].keys()), set(('$tip',)))
self.assertEqual(len(ds.targets[0]['baselines']['$tip']), 26)
self.assertNotIn('$par', ds.class_mapping['baselines'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_merge_after_valid_regions(self):
"""
Test that filtering with valid_regions occurs before merging.
"""
# merge $par and $pac into $tip but discard $par before
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_regions=('$tip', '$pac'),
merge_regions={'$par': '$tip', '$pac': '$tip'},
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 7)
self.assertEqual(set(ds.class_mapping['regions'].keys()), set(('$tip',)))
self.assertEqual(len(ds.targets[0]['regions']['$tip']), 2)
self.assertNotIn('$par', ds.class_mapping['regions'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
class TestInputTransforms(unittest.TestCase):
"""
Tests for ImageInputTransforms class
"""
def setUp(self):
self.im = Image.open(resources / '000236.png')
self.simple_inst = {'batch': 1,
'height': 48,
'width': 0,
'channels': 1,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
self.simple_inst_norm = {'batch': 1,
'height': 48,
'width': 0,
'channels': 1,
'pad': (16, 0),
'valid_norm': True,
'force_binarization': False}
self.simple_inst_rgb = {'batch': 1,
'height': 48,
'width': 0,
'channels': 3,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
self.simple_inst_norm_rgb = {'batch': 1,
'height': 48,
'width': 0,
'channels': 3,
'pad': (16, 0),
'valid_norm': True,
'force_binarization': False}
self.channel_height_inst = {'batch': 1,
'height': 1,
'width': 0,
'channels': 72,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
self.invalid_channels = {'batch': 1,
'height': 48,
'width': 0,
'channels': 4,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
def test_imageinputtransforms_simple(self):
"""
Simple ImageInputTransforms instantiation.
"""
tf = ImageInputTransforms(**self.simple_inst)
for k, v in self.simple_inst.items():
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
check_output(self, self.simple_inst, self.im, tf(self.im))
def test_imageinputtransforms_simple_rgb(self):
"""
Simple RGB ImageInputTransforms instantiation.
"""
tf = ImageInputTransforms(**self.simple_inst_rgb)
for k, v in self.simple_inst_rgb.items():
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
check_output(self, self.simple_inst_rgb, self.im, tf(self.im))
def test_imageinputtransforms_norm_rgb(self):
"""
RGB ImageInputTransforms instantiation with centerline normalization
valid (but not enabled).
"""
tf = ImageInputTransforms(**self.simple_inst_norm_rgb)
for k, v in self.simple_inst_norm_rgb.items():
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
check_output(self, self.simple_inst_norm_rgb, self.im, tf(self.im))
def test_imageinputtransforms_simple_norm(self):
"""
ImageInputTransforms instantiation with centerline normalization valid.
"""
tf = ImageInputTransforms(**self.simple_inst_norm)
for k, v in self.simple_inst_norm.items():
self.assertEqual(getattr(tf, k), v)
self.assertTrue(tf.centerline_norm)
check_output(self, self.simple_inst_norm, self.im, tf(self.im))
def test_imageinputtransforms_channel_height(self):
"""
ImageInputTransforms with height in channel dimension
"""
tf = ImageInputTransforms(**self.channel_height_inst)
for k, v in self.channel_height_inst.items():
if k == 'channels':
self.assertEqual(1, tf.channels)
elif k == 'height':
self.assertEqual(self.channel_height_inst['channels'], tf.height)
else:
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
self.channel_height_inst['height'] = self.channel_height_inst['channels']
self.channel_height_inst['channels'] = 1
check_output(self, self.channel_height_inst, self.im, tf(self.im))
def test_imageinputtransforms_invalid_channels(self):
"""
ImageInputTransforms instantiation with invalid number of channels
"""
with raises(KrakenInputException):
tf = ImageInputTransforms(**self.invalid_channels)
| 11,240 | 40.479705 | 98 |
py
|
kraken
|
kraken-main/tests/test_train.py
|
# -*- coding: utf-8 -*-
import unittest
import json
import kraken
from pytest import raises
from pathlib import Path
from kraken.lib import xml
from kraken.lib.train import KrakenTrainer, RecognitionModel, SegmentationModel
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestKrakenTrainer(unittest.TestCase):
"""
Tests for KrakenTrainer class
"""
def setUp(self):
self.xml = resources / '170025120000003,0074.xml'
self.bls = xml.parse_page(self.xml)
self.box_lines = [resources / '000236.png']
self.model = resources / 'model_small.mlmodel'
def test_krakentrainer_rec_box_load_fail(self):
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='fail')
with raises(KrakenInputException):
module.setup()
def test_krakentrainer_rec_bl_load_fail(self):
"""
Tests that the proper exception is raised when loading model not fitting the dataset.
"""
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='fail')
with raises(KrakenInputException):
module.setup()
def test_krakentrainer_rec_box_load_union(self):
"""
Tests that adaptation works in `union` mode.
The dataset brings 15 new characters. Input model had 3. There is one spec. char to account for.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='union')
module.setup("fit")
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '19')
def test_krakentrainer_rec_box_load_new(self):
"""
Tests that adaptation works in `new` mode.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='new')
module.setup("fit")
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '16')
def test_krakentrainer_rec_box_append(self):
"""
Tests that appending new layers onto a loaded model works.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
model=self.model,
append=1,
spec='[Cr4,4,32]',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
self.assertTrue(module.nn.spec.startswith('[1,48,0,1 Cr{C_0}4,2,1,4,2 Cr{C_1}4,4,32 O{O_2}'))
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_load(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='fail')
with raises(KrakenInputException):
module.setup()
def test_krakentrainer_rec_bl_load_union(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='union')
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '60')
def test_krakentrainer_rec_bl_load_new(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
training_data=training_data,
evaluation_data=evaluation_data,
resize='new')
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
trainer = KrakenTrainer(max_steps=1)
self.assertEqual(module.nn.named_spec[-1].split("c")[-1], '60')
def test_krakentrainer_rec_bl_append(self):
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
model=self.model,
append=1,
spec='[Cr4,4,32]',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
self.assertTrue(module.nn.spec.startswith('[1,48,0,1 Cr{C_0}4,2,1,4,2 Cr{C_1}4,4,32 O{O_2}'))
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_box_path(self):
"""
Tests recognition trainer constructor with legacy path training data.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'bbox')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_xml(self):
"""
Tests recognition trainer constructor with XML training data.
"""
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
self.assertEqual(len(module.train_set.dataset), 44)
self.assertEqual(len(module.val_set.dataset), 44)
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_dict(self):
"""
Tests recognition trainer constructor with dictionary style training data.
"""
training_data = [{'image': resources / 'bw.png', 'text': 'foo', 'baseline': [[10, 10], [300, 10]], 'boundary': [[10, 5], [300, 5], [300, 15], [10, 15]]}]
evaluation_data = [{'image': resources / 'bw.png', 'text': 'foo', 'baseline': [[10, 10], [300, 10]], 'boundary': [[10, 5], [300, 5], [300, 15], [10, 15]]}]
module = RecognitionModel(format_type=None,
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.nn.seg_type, 'baselines')
self.assertIsInstance(module.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
trainer = KrakenTrainer(max_steps=1)
def test_krakentrainer_rec_bl_augment(self):
"""
Test that augmentation is added if specified.
"""
training_data = [self.xml]
evaluation_data = [self.xml]
module = RecognitionModel(format_type='xml',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.train_set.dataset.aug, None)
module = RecognitionModel({'augment': True},
format_type='xml',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertIsInstance(module.train_set.dataset.aug, kraken.lib.dataset.recognition.DefaultAugmenter)
def test_krakentrainer_rec_box_augment(self):
"""
Test that augmentation is added if specified.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
module = RecognitionModel(format_type='path',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertEqual(module.train_set.dataset.aug, None)
module = RecognitionModel({'augment': True},
format_type='path',
training_data=training_data,
evaluation_data=evaluation_data)
module.setup()
self.assertIsInstance(module.train_set.dataset.aug, kraken.lib.dataset.recognition.DefaultAugmenter)
| 10,918 | 44.119835 | 163 |
py
|
kraken
|
kraken-main/tests/test_lineest.py
|
# -*- coding: utf-8 -*-
import unittest
from PIL import Image
from pathlib import Path
from pytest import raises
from kraken.lib import lineest
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestLineest(unittest.TestCase):
"""
Testing centerline estimator
"""
def setUp(self):
self.lnorm = lineest.CenterNormalizer()
def test_dewarp_bw(self):
"""
Test dewarping of a single line in B/W
"""
with Image.open(resources / '000236.png') as im:
o = lineest.dewarp(self.lnorm, im.convert('1'))
self.assertEqual(self.lnorm.target_height, o.size[1])
def test_dewarp_gray(self):
"""
Test dewarping of a single line in grayscale
"""
with Image.open(resources /'000236.png') as im:
o = lineest.dewarp(self.lnorm, im.convert('L'))
self.assertEqual(self.lnorm.target_height, o.size[1])
def test_dewarp_fail_color(self):
"""
Test dewarping of a color line fails
"""
with raises(ValueError):
with Image.open(resources /'000236.png') as im:
lineest.dewarp(self.lnorm, im.convert('RGB'))
def test_dewarp_bw_undewarpable(self):
"""
Test dewarping of an undewarpable line.
"""
with Image.open(resources /'ONB_ibn_19110701_010.tif_line_1548924556947_449.png') as im:
o = lineest.dewarp(self.lnorm, im)
self.assertEqual(self.lnorm.target_height, o.size[1])
| 1,551 | 27.740741 | 96 |
py
|
kraken
|
kraken-main/tests/test_cli.py
|
# -*- coding: utf-8 -*-
import os
import click
import unittest
import tempfile
import numpy as np
from PIL import Image
from pathlib import Path
from click.testing import CliRunner
from kraken.kraken import cli
from pytest import raises
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestCLI(unittest.TestCase):
"""
Testing the kraken CLI
"""
def setUp(self):
self.temp = tempfile.NamedTemporaryFile(delete=False)
self.runner = CliRunner()
self.color_img = resources / 'input.tif'
self.bw_img = resources / 'bw.png'
def tearDown(self):
self.temp.close()
os.unlink(self.temp.name)
def test_binarize_color(self):
"""
Tests binarization of color images.
"""
with tempfile.NamedTemporaryFile() as fp:
result = self.runner.invoke(cli, ['-i', self.color_img, fp.name, 'binarize'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(tuple(map(lambda x: x[1], Image.open(fp).getcolors())), (0, 255))
def test_binarize_bw(self):
"""
Tests binarization of b/w images.
"""
with tempfile.NamedTemporaryFile() as fp:
result = self.runner.invoke(cli, ['-i', self.bw_img, fp.name, 'binarize'])
self.assertEqual(result.exit_code, 0)
bw = np.array(Image.open(self.bw_img))
new = np.array(Image.open(fp.name))
self.assertTrue(np.all(bw == new))
def test_segment_color(self):
"""
Tests that segmentation is aborted when given color image.
"""
with tempfile.NamedTemporaryFile() as fp:
result = self.runner.invoke(cli, ['-r', '-i', self.color_img, fp.name, 'segment'])
self.assertEqual(result.exit_code, 1)
def test_segment_color_noraise(self):
"""
Tests that segmentation does not return 1 when given color image in noraise mode.
"""
with tempfile.NamedTemporaryFile() as fp:
result = self.runner.invoke(cli, ['-i', self.color_img, fp.name, 'segment'])
self.assertEqual(result.exit_code, 0)
| 2,179 | 31.058824 | 94 |
py
|
kraken
|
kraken-main/tests/test_binarization.py
|
# -*- coding: utf-8 -*-
import unittest
from pytest import raises
from PIL import Image
from pathlib import Path
from kraken.binarization import nlbin
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestBinarization(unittest.TestCase):
"""
Tests of the nlbin function for binarization of images
"""
def test_not_binarize_empty(self):
"""
Test that mode '1' images aren't binarized again.
"""
with raises(KrakenInputException):
with Image.new('1', (1000,1000)) as im:
nlbin(im)
def test_not_binarize_bw(self):
"""
Test that mode '1' images aren't binarized again.
"""
with Image.open(resources / 'bw.png') as im:
self.assertEqual(im, nlbin(im))
def test_binarize_no_bw(self):
"""
Tests binarization of image formats without a 1bpp mode (JPG).
"""
with Image.open(resources / 'input.jpg') as im:
res = nlbin(im)
# calculate histogram and check if only pixels of value 0/255 exist
self.assertEqual(254, res.histogram().count(0), msg='Output not '
'binarized')
def test_binarize_tif(self):
"""
Tests binarization of RGB TIFF images.
"""
with Image.open(resources /'input.tif') as im:
res = nlbin(im)
# calculate histogram and check if only pixels of value 0/255 exist
self.assertEqual(254, res.histogram().count(0), msg='Output not '
'binarized')
def test_binarize_grayscale(self):
"""
Test binarization of mode 'L' images.
"""
with Image.open(resources / 'input.tif') as im:
res = nlbin(im.convert('L'))
# calculate histogram and check if only pixels of value 0/255 exist
self.assertEqual(254, res.histogram().count(0), msg='Output not '
'binarized')
| 2,076 | 31.453125 | 79 |
py
|
kraken
|
kraken-main/tests/test_rpred.py
|
# -*- coding: utf-8 -*-
import os
import json
import pytest
import unittest
from PIL import Image
from pytest import raises
from pathlib import Path
from collections import defaultdict
from kraken.lib.models import load_any
from kraken.rpred import rpred, mm_rpred, BaselineOCRRecord, BBoxOCRRecord
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestBBoxRecords(unittest.TestCase):
"""
Tests the bounding box OCR record.
"""
def setUp(self):
with open(resources / 'records.json', 'r') as fp:
self.box_records = json.load(fp)
self.ltr_record = self.box_records[0]
def test_bbox_record_cuts(self):
"""
Make sure that the cuts of a record are converted to absolute coordinates.
"""
record = BBoxOCRRecord(**self.ltr_record)
self.assertEqual(record.cuts, [[[1437, 119], [1437, 256], [1449, 256], [1449, 119]],
[[1484, 119], [1484, 256], [1496, 256], [1496, 119]],
[[1508, 119], [1508, 256], [1520, 256], [1520, 119]],
[[1568, 119], [1568, 256], [1568, 256], [1568, 119]],
[[1603, 119], [1603, 256], [1603, 256], [1603, 119]],
[[1615, 119], [1615, 256], [1627, 256], [1627, 119]],
[[1639, 119], [1639, 256], [1639, 256], [1639, 119]],
[[1663, 119], [1663, 256], [1674, 256], [1674, 119]],
[[1698, 119], [1698, 256], [1698, 256], [1698, 119]],
[[1722, 119], [1722, 256], [1734, 256], [1734, 119]],
[[1746, 119], [1746, 256], [1758, 256], [1758, 119]],
[[1793, 119], [1793, 256], [1805, 256], [1805, 119]],
[[1817, 119], [1817, 256], [1829, 256], [1829, 119]],
[[1853, 119], [1853, 256], [1853, 256], [1853, 119]],
[[1876, 119], [1876, 256], [1888, 256], [1888, 119]],
[[1924, 119], [1924, 256], [1936, 256], [1936, 119]],
[[1959, 119], [1959, 256], [1971, 256], [1971, 119]],
[[2007, 119], [2007, 256], [2019, 256], [2019, 119]],
[[2054, 119], [2054, 256], [2054, 256], [2054, 119]],
[[2078, 119], [2078, 256], [2090, 256], [2090, 119]],
[[2149, 119], [2149, 256], [2149, 256], [2149, 119]],
[[2161, 119], [2161, 256], [2173, 256], [2173, 119]]])
def test_bbox_record_redisplay(self):
"""
Test that a display order record remains in display order when
requesting a DO record.
"""
record = BBoxOCRRecord(**self.ltr_record, display_order=True)
self.assertEqual(record, record.display_order())
def test_bbox_record_relogical(self):
"""
Test that a logical order record remains in logical order when
requesting a LO record.
"""
record = BBoxOCRRecord(**self.ltr_record, display_order=False)
self.assertEqual(record, record.logical_order())
def test_bbox_record_display(self):
"""
test display order conversion of record.
"""
record = BBoxOCRRecord(**self.ltr_record, display_order=False)
re_record = record.display_order()
self.assertEqual(re_record.prediction, 'في معجزاته عليه السلام')
self.assertEqual(re_record[:][1], ((1437, 119), (2173, 119), (2173, 256), (1437, 256)))
self.assertAlmostEqual(re_record[2:8][2], 0.9554762, places=4)
def test_bbox_record_logical(self):
"""
Test logical order conversion of record.
"""
record = BBoxOCRRecord(**self.ltr_record, display_order=True)
re_record = record.logical_order()
self.assertEqual(re_record.prediction, 'في معجزاته عليه السلام')
self.assertEqual(re_record[:][1], ((1437, 119), (2173, 119), (2173, 256), (1437, 256)))
self.assertAlmostEqual(re_record[2:8][2], 0.9554762, places=4)
def test_bbox_record_slicing(self):
"""
Tests simple slicing/aggregation of elements in record.
"""
record = BBoxOCRRecord(**self.ltr_record, display_order=True)
pred, cut, conf = record[1:8]
self.assertEqual(pred, 'السلا ه')
self.assertEqual(cut, ((1484, 119), (1674, 119), (1674, 256), (1484, 256)))
self.assertAlmostEqual(conf, 0.9259478, places=4)
def test_bbox_record_slicing(self):
"""
Tests complex slicing/aggregation of elements in record.
"""
record = BBoxOCRRecord(**self.ltr_record, display_order=True)
pred, cut, conf = record[1:5:2]
self.assertEqual(pred, 'اس')
self.assertEqual(cut, ((1484, 119), (1568, 119), (1568, 256), (1484, 256)))
self.assertAlmostEqual(conf, 0.74411, places=4)
class TestBaselineRecords(unittest.TestCase):
"""
Tests the baseline OCR record.
"""
def setUp(self):
self.bidi_record = ()
self.ltr_record = ()
def test_baseline_record_cuts(self):
"""
Make sure that the cuts of a record are converted to absolute coordinates.
"""
pass
def test_baseline_record_redisplay(self):
"""
Test that a display order record remains in display order when
requesting a DO record.
"""
pass
def test_baseline_record_relogical(self):
"""
Test that a logical order record remains in logical order when
requesting a LO record.
"""
pass
def test_baseline_record_display(self):
"""
test display order conversion of record.
"""
pass
def test_baseline_record_logical(self):
"""
Test logical order conversion of record.
"""
pass
def test_baseline_record_slicing(self):
"""
Tests simple slicing/aggregation of elements in record.
"""
pass
def test_baseline_record_slicing(self):
"""
Tests complex slicing/aggregation of elements in record.
"""
pass
class TestRecognition(unittest.TestCase):
"""
Tests of the recognition facility and associated routines.
"""
def setUp(self):
self.im = Image.open(resources / 'bw.png')
self.overfit_line = Image.open(resources / '000236.png')
self.model = load_any(resources / 'overfit.mlmodel')
def tearDown(self):
self.im.close()
def test_rpred_bbox_outbounds(self):
"""
Tests correct handling of invalid bbox line coordinates.
"""
with raises(KrakenInputException):
pred = rpred(self.model, self.im, {'boxes': [[-1, -1, 10000, 10000]], 'text_direction': 'horizontal'}, True)
next(pred)
@pytest.mark.xfail
def test_rpred_bl_outbounds(self):
"""
Tests correct handling of invalid baseline coordinates.
"""
with raises(KrakenInputException):
pred = rpred(self.model, self.im, {'lines': [{'tags': {'type': 'default'},
'baseline': [[0,0], [10000, 0]],
'boundary': [[-1, -1], [-1, 10000], [10000, 10000], [10000, -1]]}],
'text_direction': 'horizontal',
'type': 'baselines'}, True)
next(pred)
def test_simple_bbox_rpred(self):
"""
Tests simple recognition without tags.
"""
pred = rpred(self.model, self.overfit_line, {'boxes': [[0, 0, 2544, 156]], 'text_direction': 'horizontal'}, True)
record = next(pred)
self.assertEqual(record.prediction, 'ܡ ܘܡ ܗ ܡܕܐ ܐ ܐܐ ܡ ܗܗܐܐܐܕ')
def test_simple_bl_rpred(self):
"""
Tests simple recognition without tags.
"""
pred = rpred(self.model, self.overfit_line, {'boxes': [[0, 0, 2544, 156]], 'text_direction': 'horizontal'}, True)
record = next(pred)
self.assertEqual(record.prediction, 'ܡ ܘܡ ܗ ܡܕܐ ܐ ܐܐ ܡ ܗܗܐܐܐܕ')
def test_mm_rpred_bbox_missing_tags(self):
"""
Test that mm_rpred fails when tags are missing
"""
with raises(KrakenInputException):
pred = mm_rpred({'default': self.model},
self.overfit_line,
{'boxes': [[('default', [0, 0, 2544, 156])],
[('foobar', [0, 0, 2544, 156])]],
'text_direction': 'horizontal',
'script_detection': True},
True)
def test_mm_rpred_bl_missing_tags(self):
"""
Test that mm_rpred fails when tags are missing
"""
with raises(KrakenInputException):
pred = mm_rpred({'default': self.model},
self.overfit_line,
{'lines': [{'tags': {'type': 'default'},
'baseline': [[0,0], [10000, 0]],
'boundary': [[-1, -1], [-1, 10000], [10000, 10000], [10000, -1]]},
{'tags': {'type': 'foobar'},
'baseline': [[0,0], [10000, 0]],
'boundary': [[-1, -1], [-1, 10000], [10000, 10000], [10000, -1]]}],
'text_direction': 'horizontal',
'type': 'baselines'},
True)
def test_mm_rpred_bbox_ignore_tags(self):
"""
Tests mm_rpred recognition with ignore tags.
"""
pred = mm_rpred({'default': self.model},
self.overfit_line,
{'boxes': [[('foobar', [0, 0, 2544, 156])],
[('default', [0, 0, 2544, 156])]],
'text_direction': 'horizontal',
'script_detection': True},
True,
tags_ignore=['foobar'])
record = next(pred)
self.assertEqual(record.prediction, '')
record = next(pred)
self.assertEqual(record.prediction, 'ܡ ܘܡ ܗ ܡܕܐ ܐ ܐܐ ܡ ܗܗܐܐܐܕ')
def test_mm_rpred_bbox_default_tags(self):
"""
Tests recognition with default tag.
"""
pred = mm_rpred(defaultdict(lambda: self.model),
self.overfit_line,
{'boxes': [[('foobar', [0, 0, 2544, 156])],
[('default', [0, 0, 2544, 156])]],
'text_direction': 'horizontal',
'script_detection': True},
True)
record = next(pred)
self.assertEqual(record.prediction, 'ܡ ܘܡ ܗ ܡܕܐ ܐ ܐܐ ܡ ܗܗܐܐܐܕ')
record = next(pred)
self.assertEqual(record.prediction, 'ܡ ܘܡ ܗ ܡܕܐ ܐ ܐܐ ܡ ܗܗܐܐܐܕ')
def test_mm_rpred_bl_ignore_tags(self):
"""
Tests baseline recognition with ignore tags.
"""
pred = mm_rpred({'default': self.model},
self.overfit_line,
{'lines': [{'tags': {'type': 'foobar'},
'baseline': [[0, 10], [2543, 10]],
'boundary': [[0, 0], [2543, 0], [2543, 155], [0, 155]]},
{'tags': {'type': 'default'},
'baseline': [[0, 10], [2543, 10]],
'boundary': [[0, 0], [2543, 0], [2543, 155], [0, 155]]}],
'script_detection': True,
'type': 'baselines'},
True,
tags_ignore=['foobar'])
record = next(pred)
self.assertEqual(record.prediction, '')
record = next(pred)
self.assertEqual(record.prediction, '.ܗ ܣܗܐ ܕ ܣ ܗ ܕܗܗ ܟܕܗܣ ܠ ܐ .ܣܕܐܣ. ܗ ')
def test_mm_rpred_bl_default_tags(self):
"""
Tests baseline recognition with default tag.
"""
pred = mm_rpred(defaultdict(lambda: self.model),
self.overfit_line,
{'lines': [{'tags': {'type': 'foobar'},
'baseline': [[0, 10], [2543, 10]],
'boundary': [[0, 0], [2543, 0], [2543, 155], [0, 155]]},
{'tags': {'type': 'default'},
'baseline': [[0, 10], [2543, 10]],
'boundary': [[0, 0], [2543, 0], [2543, 155], [0, 155]]}],
'script_detection': True,
'type': 'baselines'},
True)
record = next(pred)
self.assertEqual(record.prediction, '.ܗ ܣܗܐ ܕ ܣ ܗ ܕܗܗ ܟܕܗܣ ܠ ܐ .ܣܕܐܣ. ܗ ')
record = next(pred)
self.assertEqual(record.prediction, '.ܗ ܣܗܐ ܕ ܣ ܗ ܕܗܗ ܟܕܗܣ ܠ ܐ .ܣܕܐܣ. ܗ ')
def test_mm_rpred_bl_nobidi(self):
"""
Tests baseline recognition without bidi reordering.
"""
pred = mm_rpred(defaultdict(lambda: self.model),
self.overfit_line,
{'lines': [{'tags': {'type': 'default'},
'baseline': [[0, 10], [2543, 10]],
'boundary': [[0, 0], [2543, 0], [2543, 155], [0, 155]]}],
'script_detection': True,
'type': 'baselines'},
bidi_reordering=False)
record = next(pred)
self.assertEqual(record.prediction, 'ܕܗ .ܣܐܗܗ.ܐ ܗܣ ܕ ܗܣ ܗ.ܗܝܣܗ ܣ ܗܢ ܪܗܗܕ ܐ ܗܠ')
def test_mm_rpred_bbox_nobidi(self):
"""
Tests bbox recognition without bidi reordering.
"""
pred = mm_rpred(defaultdict(lambda: self.model),
self.overfit_line,
{'boxes': [[('foobar', [0, 0, 2544, 156])],
[('default', [0, 0, 2544, 156])]],
'text_direction': 'horizontal',
'script_detection': True},
bidi_reordering=False)
record = next(pred)
self.assertEqual(record.prediction, 'ܕܗܣܐܕ ܪܝ .ܡܡ ܐܠܠ ܗܠ ܐܘܗ ܟܘܗܢ ܡܡ ܐܠ')
| 14,934 | 42.16474 | 125 |
py
|
kraken
|
kraken-main/tests/test_models.py
|
# -*- coding: utf-8 -*-
import os
import pickle
import unittest
import tempfile
from pytest import raises
from pathlib import Path
import kraken.lib.lstm
from kraken.lib import models
from kraken.lib.exceptions import KrakenInvalidModelException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestModels(unittest.TestCase):
"""
Testing model loading routines
"""
def setUp(self):
self.temp = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
self.temp.close()
os.unlink(self.temp.name)
def test_load_invalid(self):
"""
Tests correct handling of invalid files.
"""
with raises(KrakenInvalidModelException):
models.load_any(self.temp.name)
| 787 | 20.888889 | 61 |
py
|
kraken
|
kraken-main/tests/test_readingorder.py
|
# -*- coding: utf-8 -*-
import pytest
import unittest
from typing import Sequence, Tuple
import shapely.geometry as geom
import numpy as np
from kraken.lib.segmentation import is_in_region, reading_order, topsort
def polygon_slices(polygon: Sequence[Tuple[int, int]]) -> Tuple[slice, slice]:
"""Convert polygons to slices for reading_order"""
linestr = geom.LineString(polygon)
slices = (slice(linestr.bounds[1], linestr.bounds[3]),
slice(linestr.bounds[0], linestr.bounds[2]))
return slices
class TestReadingOrder(unittest.TestCase):
"""
Test the reading order algorithms.
"""
def test_is_in_region(self):
"""
A line should be in its rectangular bounding box.
"""
line = geom.LineString([(0, 0), (1, 1)])
polygon = geom.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.assertTrue(is_in_region(line, polygon))
def test_is_in_region2(self):
"""
A real baseline should be in its polygonization.
"""
line = geom.LineString([(268, 656), (888, 656)])
polygon = geom.Polygon([(268, 656), (265, 613), (885, 611), (888, 656), (885, 675), (265, 672)])
self.assertTrue(is_in_region(line, polygon))
def test_is_in_region3(self):
"""
A line that does not cross the box should not be in the region.
"""
line = geom.LineString([(2, 2), (1, 1)])
polygon = geom.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.assertFalse(is_in_region(line, polygon))
def test_order_simple_over_under(self):
"""
Two lines (as their polygonal boundaries) are already in order.
In this example, the boundaries are rectangles that align vertically,
have horizontal base lines and do not overlap or touch::
AAAA
BBBB
The reading order should be the same for left-to-right and right-to-left.
"""
polygon0 = [[10, 10], [10, 20], [100, 20], [100, 10], [10, 10]]
polygon1 = [[10, 30], [10, 40], [100, 40], [100, 30], [10, 30]]
order_lr = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
order_rl = reading_order([polygon_slices(line) for line in [polygon0, polygon1]], 'rl')
# line0 should come before line1, lines do not come before themselves
expected = np.array([[0, 1], [0, 0]])
self.assertTrue(np.array_equal(order_lr, expected), "Reading order is not as expected: {}".format(order_lr))
self.assertTrue(np.array_equal(order_rl, expected), "Reading order is not as expected: {}".format(order_rl))
def test_order_simple_over_under_touching(self):
"""
Two lines (as their polygonal boundaries) are already in order.
In this example, the boundaries are rectangles that align vertically,
have horizontal base lines and touch::
AAAA
BBBB
The reading order should be the same for left-to-right and right-to-left.
"""
polygon0 = [[10, 10], [10, 30], [100, 30], [100, 10], [10, 10]]
polygon1 = [[10, 30], [10, 40], [100, 40], [100, 30], [10, 30]]
order_lr = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
order_rl = reading_order([polygon_slices(line) for line in [polygon0, polygon1]], 'rl')
# line0 should come before line1, lines do not come before themselves
expected = np.array([[0, 1], [0, 0]])
self.assertTrue(np.array_equal(order_lr, expected), "Reading order is not as expected: {}".format(order_lr))
self.assertTrue(np.array_equal(order_rl, expected), "Reading order is not as expected: {}".format(order_rl))
def test_order_simple_left_right(self):
"""
Two lines (as their polygonal boundaries) are already in order.
In this example, the boundaries are rectangles that align horizontally,
have horizontal base lines and do not overlap or touch::
AAAA BBBB
"""
polygon0 = [[10, 10], [10, 20], [100, 20], [100, 10], [10, 10]]
polygon1 = [[150, 10], [150, 20], [250, 20], [250, 10], [150, 10]]
order = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
# line0 should come before line1, lines do not come before themselves
expected = np.array([[0, 1], [0, 0]])
self.assertTrue(np.array_equal(order, expected), "Reading order is not as expected: {}".format(order))
@pytest.mark.xfail
def test_order_simple_left_right_touching(self):
"""
Two lines (as their polygonal boundaries) are already in order.
In this example, the boundaries are rectangles that align horizontally,
have horizontal base lines and touch::
AAAABBBB
"""
polygon0 = [[10, 10], [10, 20], [100, 20], [100, 10], [10, 10]]
polygon1 = [[100, 10], [100, 20], [250, 20], [250, 10], [100, 10]]
order = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
# line0 should come before line1, lines do not come before themselves
expected = np.array([[0, 1], [0, 0]])
self.assertTrue(np.array_equal(order, expected), "Reading order is not as expected: {}".format(order))
def test_order_simple_right_left(self):
"""
Two lines (as their polygonal boundaries) are in reverse RTL-order.
In this example, the boundaries are rectangles that align horizontally,
have horizontal base lines and do not overlap or touch::
BBBB AAAA
"""
polygon0 = [[10, 10], [10, 20], [100, 20], [100, 10], [10, 10]]
polygon1 = [[150, 10], [150, 20], [250, 20], [250, 10], [150, 10]]
order = reading_order([polygon_slices(line) for line in [polygon0, polygon1]], 'rl')
# line1 should come before line0, lines do not come before themselves
expected = np.array([[0, 0], [1, 0]])
self.assertTrue(np.array_equal(order, expected), "Reading order is not as expected: {}".format(order))
@pytest.mark.xfail
def test_order_simple_right_left_touching(self):
"""
Two lines (as their polygonal boundaries) are in reverse RTL-order.
In this example, the boundaries are rectangles that align horizontally,
have horizontal base lines and touch::
BBBBAAAA
"""
polygon0 = [[10, 10], [10, 20], [100, 20], [100, 10], [10, 10]]
polygon1 = [[100, 10], [100, 20], [250, 20], [250, 10], [100, 10]]
order = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
# line1 should come before line0, lines do not come before themselves
expected = np.array([[0, 0], [1, 0]])
self.assertTrue(np.array_equal(order, expected), "Reading order is not as expected: {}".format(order))
def test_order_real_reverse(self):
"""
Real example: lines are in reverse order.
The reading order should be the same for left-to-right and right-to-left.
"""
polygon0 = [[474, 2712], [466, 2669], [1741, 2655], [1749, 2696], [1746, 2709], [474, 2725]]
polygon1 = [[493, 2409], [488, 2374], [1733, 2361], [1741, 2395], [1738, 2409], [493, 2422]]
order_lr = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
order_rl = reading_order([polygon_slices(line) for line in [polygon0, polygon1]], 'rl')
# line1 should come before line0, lines do not come before themselves
expected = np.array([[0, 0], [1, 0]])
self.assertTrue(np.array_equal(order_lr, expected), "Reading order is not as expected: {}".format(order_lr))
self.assertTrue(np.array_equal(order_rl, expected), "Reading order is not as expected: {}".format(order_rl))
def test_order_real_in_order(self):
"""
Real (modified) example: lines are in order.
The reading order should be the same for left-to-right and right-to-left.
"""
polygon0 = [[493, 2409], [488, 2374], [1733, 2361], [1741, 2395], [1738, 2409], [493, 2422]]
polygon1 = [[474, 2712], [466, 2669], [1741, 2655], [1749, 2696], [1746, 2709], [474, 2725]]
order_lr = reading_order([polygon_slices(line) for line in [polygon0, polygon1]])
order_rl = reading_order([polygon_slices(line) for line in [polygon0, polygon1]], 'rl')
# line0 should come before line1, lines do not come before themselves
expected = np.array([[0, 1], [0, 0]])
self.assertTrue(np.array_equal(order_lr, expected), "Reading order is not as expected: {}".format(order_lr))
self.assertTrue(np.array_equal(order_rl, expected), "Reading order is not as expected: {}".format(order_rl))
def test_topsort_ordered(self):
"""
Return list for three lines that are already in order.
"""
partial_sort = np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]])
expected = [0, 1, 2]
self.assertTrue(np.array_equal(topsort(partial_sort), expected))
def test_topsort_ordered_no_self(self):
"""
Return list for three lines that are already in order.
"""
partial_sort = np.array([[0, 1, 1], [0, 0, 1], [0, 0, 0]])
expected = [0, 1, 2]
self.assertTrue(np.array_equal(topsort(partial_sort), expected))
def test_topsort_unordered(self):
"""
Return list for three lines that are partially in order.
"""
partial_sort = np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]])
expected = [0, 2, 1]
self.assertTrue(np.array_equal(topsort(partial_sort), expected))
def test_topsort_unordered_no_self(self):
"""
Return list for three lines that are partially in order.
"""
partial_sort = np.array([[0, 1, 1], [0, 0, 0], [0, 1, 0]])
expected = [0, 2, 1]
self.assertTrue(np.array_equal(topsort(partial_sort), expected))
| 9,952 | 45.293023 | 116 |
py
|
kraken
|
kraken-main/tests/test_pageseg.py
|
# -*- coding: utf-8 -*-
import unittest
from PIL import Image
from pytest import raises
from pathlib import Path
from kraken.pageseg import segment
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
class TestPageSeg(unittest.TestCase):
"""
Tests of the page segmentation functionality
"""
def test_segment_color(self):
"""
Test correct handling of color input.
"""
with raises(KrakenInputException):
with Image.open(resources / 'input.jpg') as im:
segment(im)
def test_segment_bw(self):
"""
Tests segmentation of bi-level input.
"""
with Image.open(resources / 'bw.png') as im:
lines = segment(im)
# test if line count is roughly correct
self.assertAlmostEqual(len(lines['boxes']), 30, msg='Segmentation differs '
'wildly from true line count', delta=5)
# check if lines do not extend beyond image
for box in lines['boxes']:
self.assertLess(0, box[0], msg='Line x0 < 0')
self.assertLess(0, box[1], msg='Line y0 < 0')
self.assertGreater(im.size[0], box[2], msg='Line x1 > {}'.format(im.size[0]))
self.assertGreater(im.size[1], box[3], msg='Line y1 > {}'.format(im.size[1]))
| 1,433 | 33.142857 | 93 |
py
|
kraken
|
kraken-main/tests/test_serialization.py
|
# -*- coding: utf-8 -*-
import json
import unittest
import tempfile
import numpy as np
from lxml import etree
from io import StringIO
from pathlib import Path
from hocr_spec import HocrValidator
from collections import Counter
from kraken import rpred, serialization
from kraken.lib import xml
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
def roundtrip(self, records, fp):
"""
Checks that the order of lines after serialization and deserialization is
equal to the records.
"""
with tempfile.NamedTemporaryFile() as out:
fp.seek(0)
out.write(fp.getvalue().encode('utf-8'))
doc = xml.parse_xml(out.name)['lines']
for orig_line, parsed_line in zip(records, doc):
self.assertSequenceEqual(np.array(orig_line.baseline).tolist(),
np.array(parsed_line['baseline']).tolist(),
msg='Baselines differ after serialization.')
def validate_hocr(self, fp):
fp.seek(0)
validator = HocrValidator('standard')
report = validator.validate(fp, parse_strict=True)
self.assertTrue(report.is_valid())
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
ids = [x.get('id') for x in doc.findall('.//*[@id]')]
counts = Counter(ids)
self.assertEqual(counts.most_common(1)[0][1], 1, msg='Duplicate IDs in hOCR output')
def validate_page(self, fp):
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
ids = [x.get('id') for x in doc.findall('.//*[@id]')]
counts = Counter(ids)
if len(counts):
self.assertEqual(counts.most_common(1)[0][1], 1, msg='Duplicate IDs in PageXML output')
with open(resources / 'pagecontent.xsd') as schema_fp:
page_schema = etree.XMLSchema(etree.parse(schema_fp))
page_schema.assertValid(doc)
def validate_alto(self, fp):
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
ids = [x.get('ID') for x in doc.findall('.//*[@ID]')]
counts = Counter(ids)
self.assertEqual(counts.most_common(1)[0][1], 1, msg='Duplicate IDs in ALTO output')
with open(resources / 'alto-4-3.xsd') as schema_fp:
alto_schema = etree.XMLSchema(etree.parse(schema_fp))
alto_schema.assertValid(doc)
class TestSerializations(unittest.TestCase):
"""
Tests for output serialization
"""
def setUp(self):
with open(resources /'records.json', 'r') as fp:
self.box_records = [rpred.BBoxOCRRecord(**x) for x in json.load(fp)]
with open(resources / 'bl_records.json', 'r') as fp:
recs = json.load(fp)
self.bl_records = [rpred.BaselineOCRRecord(**bl) for bl in recs['lines']]
self.bl_regions = recs['regions']
self.metadata_steps = [{'category': 'preprocessing', 'description': 'PDF image extraction', 'settings': {}},
{'category': 'processing',
'description': 'Baseline and region segmentation',
'settings': {'model': 'foo.mlmodel', 'text_direction': 'horizontal-lr'}},
{'category': 'processing',
'description': 'Text line recognition',
'settings': {'text_direction': 'horizontal-lr',
'models': 'bar.mlmodel',
'pad': 16,
'bidi_reordering': True}}]
def test_box_vertical_hocr_serialization(self):
"""
Test vertical line hOCR serialization
"""
fp = StringIO()
fp.write(serialization.serialize(self.box_records, image_name='foo.png', writing_mode='vertical-lr', template='hocr'))
validate_hocr(self, fp)
def test_box_hocr_serialization(self):
"""
Test hOCR serialization
"""
fp = StringIO()
fp.write(serialization.serialize(self.box_records, image_name='foo.png', template='hocr'))
validate_hocr(self, fp)
def test_box_alto_serialization_validation(self):
"""
Validates output against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.box_records, image_name='foo.png', template='alto'))
validate_alto(self, fp)
def test_box_abbyyxml_serialization_validation(self):
"""
Validates output against abbyyXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.box_records, image_name='foo.png', template='abbyyxml'))
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
with open(resources / 'FineReader10-schema-v1.xml') as schema_fp:
abbyy_schema = etree.XMLSchema(etree.parse(schema_fp))
abbyy_schema.assertValid(doc)
def test_box_pagexml_serialization_validation(self):
"""
Validates output against abbyyXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.box_records, image_name='foo.png', template='pagexml'))
validate_page(self, fp)
def test_bl_alto_serialization_validation(self):
"""
Validates output against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='alto'))
validate_alto(self, fp)
roundtrip(self, self.bl_records, fp)
def test_bl_abbyyxml_serialization_validation(self):
"""
Validates output against abbyyXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='abbyyxml'))
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
with open(resources / 'FineReader10-schema-v1.xml') as schema_fp:
abbyy_schema = etree.XMLSchema(etree.parse(schema_fp))
abbyy_schema.assertValid(doc)
def test_bl_pagexml_serialization_validation(self):
"""
Validates output against PageXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='pagexml'))
validate_page(self, fp)
roundtrip(self, self.bl_records, fp)
def test_bl_region_alto_serialization_validation(self):
"""
Validates output against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='alto', regions=self.bl_regions))
validate_alto(self, fp)
roundtrip(self, self.bl_records, fp)
def test_bl_region_abbyyxml_serialization_validation(self):
"""
Validates output against abbyyXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='abbyyxml', regions=self.bl_regions))
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
with open(resources / 'FineReader10-schema-v1.xml') as schema_fp:
abbyy_schema = etree.XMLSchema(etree.parse(schema_fp))
abbyy_schema.assertValid(doc)
def test_bl_region_pagexml_serialization_validation(self):
"""
Validates output against PageXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='pagexml', regions=self.bl_regions))
validate_page(self, fp)
roundtrip(self, self.bl_records, fp)
def test_region_only_alto_serialization_validation(self):
"""
Validates output without baselines (but regions) against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize([], image_name='foo.png', template='alto', regions=self.bl_regions))
validate_alto(self, fp)
def test_region_only_abbyyxml_serialization_validation(self):
"""
Validates output without baselines (but regions) against abbyyXML schema
"""
fp = StringIO()
fp.write(serialization.serialize([], image_name='foo.png', template='abbyyxml', regions=self.bl_regions))
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
with open(resources / 'FineReader10-schema-v1.xml') as schema_fp:
abbyy_schema = etree.XMLSchema(etree.parse(schema_fp))
abbyy_schema.assertValid(doc)
def test_region_only_pagexml_serialization_validation(self):
"""
Validates output without baselines (but regions) against PageXML schema
"""
fp = StringIO()
fp.write(serialization.serialize([], image_name='foo.png', template='pagexml', regions=self.bl_regions))
validate_page(self, fp)
def test_serialize_segmentation_alto(self):
"""
Validates output of `serialize_segmentation` against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize_segmentation({'boxes': []}, image_name='foo.png', template='alto'))
validate_alto(self, fp)
def test_serialize_segmentation_pagexml(self):
"""
Validates output of `serialize_segmentation` against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize_segmentation({'boxes': []}, image_name='foo.png', template='pagexml'))
validate_page(self, fp)
def test_serialize_segmentation_alto_steps(self):
"""
Validates output of `serialize_segmentation` with processing steps against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize_segmentation({'boxes': []}, image_name='foo.png', template='alto', processing_steps=self.metadata_steps))
validate_alto(self, fp)
def test_serialize_segmentation_pagexml(self):
"""
Validates output of `serialize_segmentation` with processing steps against PageXML schema
"""
fp = StringIO()
fp.write(serialization.serialize_segmentation({'boxes': []}, image_name='foo.png', template='pagexml', processing_steps=self.metadata_steps))
validate_page(self, fp)
def test_bl_region_alto_serialization_validation_steps(self):
"""
Validates output with processing steps against ALTO schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='alto', regions=self.bl_regions, processing_steps=self.metadata_steps))
validate_alto(self, fp)
roundtrip(self, self.bl_records, fp)
def test_bl_region_abbyyxml_serialization_validation_steps(self):
"""
Validates output with processing steps against abbyyXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='abbyyxml', regions=self.bl_regions, processing_steps=self.metadata_steps))
doc = etree.fromstring(fp.getvalue().encode('utf-8'))
with open(resources / 'FineReader10-schema-v1.xml') as schema_fp:
abbyy_schema = etree.XMLSchema(etree.parse(schema_fp))
abbyy_schema.assertValid(doc)
def test_bl_region_pagexml_serialization_validation_steps(self):
"""
Validates output with processing steps against PageXML schema
"""
fp = StringIO()
fp.write(serialization.serialize(self.bl_records, image_name='foo.png', template='pagexml', regions=self.bl_regions, processing_steps=self.metadata_steps))
validate_page(self, fp)
roundtrip(self, self.bl_records, fp)
| 11,646 | 37.186885 | 164 |
py
|
kraken
|
kraken-main/tests/test_vgsl.py
|
# -*- coding: utf-8 -*-
import unittest
from pytest import raises
import os
import torch
import tempfile
from kraken.lib import vgsl, layers
class TestVGSL(unittest.TestCase):
"""
Testing VGSL module
"""
def test_helper_train(self):
"""
Tests train/eval mode helper methods
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.train()
self.assertTrue(torch.is_grad_enabled())
self.assertTrue(rnn.nn.training)
rnn.eval()
self.assertFalse(torch.is_grad_enabled())
self.assertFalse(rnn.nn.training)
@unittest.skip('works randomly on ci')
def test_helper_threads(self):
"""
Test openmp threads helper method.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.set_num_threads(4)
self.assertEqual(torch.get_num_threads(), 4)
def test_save_model(self):
"""
Test model serialization.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
with tempfile.TemporaryDirectory() as dir:
rnn.save_model(dir + '/foo.mlmodel')
self.assertTrue(os.path.exists(dir + '/foo.mlmodel'))
def test_append(self):
"""
Test appending one VGSL spec to another.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.append(1, '[Cr1,1,2 Gn2 Cr3,3,4]')
self.assertEqual(rnn.spec, '[1,1,0,48 Lbx{L_0}10 Cr{C_1}1,1,2 Gn{Gn_2}2 Cr{C_3}3,3,4]')
def test_resize(self):
"""
Tests resizing of output layers.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.resize_output(80)
self.assertEqual(rnn.nn[-1].lin.out_features, 80)
def test_del_resize(self):
"""
Tests resizing of output layers with entry deletion.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.resize_output(80, [2, 4, 5, 6, 7, 12, 25])
self.assertEqual(rnn.nn[-1].lin.out_features, 80)
def test_nested_serial_model(self):
"""
Test the creation of a nested serial model.
"""
net = vgsl.TorchVGSLModel('[1,48,0,1 Cr4,2,1,4,2 ([Cr4,2,1,1,1 Do Cr3,3,2,1,1] [Cr4,2,1,1,1 Cr3,3,2,1,1 Do]) S1(1x0)1,3 Lbx2 Do0.5 Lbx2]')
self.assertIsInstance(net.nn[1], layers.MultiParamParallel)
for x in net.nn[1].children():
self.assertIsInstance(x, layers.MultiParamSequential)
self.assertEqual(len(x), 3)
def test_parallel_model_inequal(self):
"""
Test proper raising of ValueError when parallel layers do not have the same output shape.
"""
with raises(ValueError):
net = vgsl.TorchVGSLModel('[1,48,0,1 Cr4,2,1,4,2 [Cr4,2,1,1,1 (Cr4,2,1,4,2 Cr3,3,2,1,1) S1(1x0)1,3 Lbx2 Do0.5] Lbx2]')
def test_complex_serialization(self):
"""
Test proper serialization and deserialization of a complex model.
"""
net = vgsl.TorchVGSLModel('[1,48,0,1 Cr4,2,1,4,2 ([Cr4,2,1,1,1 Do Cr3,3,2,1,1] [Cr4,2,1,1,1 Cr3,3,2,1,1 Do]) S1(1x0)1,3 Lbx2 Do0.5 Lbx2]')
| 3,156 | 33.692308 | 146 |
py
|
kraken
|
kraken-main/tests/test_codec.py
|
# -*- coding: utf-8 -*-
import unittest
from pytest import raises
from torch import IntTensor
from kraken.lib import codec
from kraken.lib.exceptions import KrakenEncodeException, KrakenCodecException
class TestCodec(unittest.TestCase):
"""
Testing codec mapping routines
"""
def setUp(self):
# codec mapping one code point to one label
self.o2o_codec = codec.PytorchCodec('ab')
self.o2o_codec_strict = codec.PytorchCodec('ab', strict=True)
# codec mapping many code points to one label
self.m2o_codec = codec.PytorchCodec(['aaa' , 'aa', 'a', 'b'])
self.m2o_codec_strict = codec.PytorchCodec(['aaa' , 'aa', 'a', 'b'], strict=True)
# codec mapping one code point to many labels
self.o2m_codec = codec.PytorchCodec({'a': [10, 11, 12], 'b': [12, 45, 80]})
self.o2m_codec_strict = codec.PytorchCodec({'a': [10, 11, 12], 'b': [12, 45, 80]}, strict=True)
# codec mapping many code points to many labels
self.m2m_codec = codec.PytorchCodec({'aaa': [10, 11, 12], 'aa': [9, 9], 'a': [11], 'bb': [15], 'b': [12]})
self.m2m_codec_strict = codec.PytorchCodec({'aaa': [10, 11, 12], 'aa': [9, 9], 'a': [11], 'bb': [15], 'b': [12]}, strict=True)
self.invalid_c_sequence = 'aaababbcaaa'
self.valid_c_sequence = 'aaababbaaabbbb'
self.ada_sequence = 'cdaabae'
self.invalid_l_sequence = [(45, 78, 778, 0.3793492615638364),
(10, 203, 859, 0.9485075253700872),
(11, 70, 601, 0.7885297329523855),
(12, 251, 831, 0.7216817042926938),
(900, 72, 950, 0.27609823017048707)]
def test_o2o_encode(self):
"""
Test correct encoding of one-to-one code point sequence
"""
self.assertTrue(self.o2o_codec.encode(self.valid_c_sequence).eq(
IntTensor([1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2])).all())
def test_m2o_encode(self):
"""
Test correct encoding of many-to-one code point sequence
"""
self.assertTrue(self.m2o_codec.encode(self.valid_c_sequence).eq(
IntTensor([3, 4, 1, 4, 4, 3, 4, 4, 4, 4])).all())
def test_o2m_encode(self):
"""
Test correct encoding of one-to-many code point sequence
"""
self.assertTrue(self.o2m_codec.encode(self.valid_c_sequence).eq(
IntTensor([10, 11, 12, 10, 11, 12, 10, 11, 12,
12, 45, 80, 10, 11, 12, 12, 45, 80, 12, 45,
80, 10, 11, 12, 10, 11, 12, 10, 11, 12, 12,
45, 80, 12, 45, 80, 12, 45, 80, 12, 45,
80])).all())
def test_m2m_encode(self):
"""
Test correct encoding of many-to-many code point sequence
"""
self.assertTrue(self.m2m_codec.encode(self.valid_c_sequence).eq(
IntTensor([10, 11, 12, 12, 11, 15, 10, 11, 12, 15, 15])).all())
def test_o2o_decode(self):
"""
Test correct decoding of one-to-one label sequence
"""
self.assertEqual(''.join(x[0] for x in self.o2o_codec.decode([(1, 288, 652, 0.8537325587315542),
(1, 120, 861, 0.4968470297302481),
(1, 372, 629, 0.008650773294205938),
(2, 406, 831, 0.15637985875540783),
(1, 3, 824, 0.26475146828232776),
(2, 228, 959, 0.3062689368044844),
(2, 472, 679, 0.8677848554329698),
(1, 482, 771, 0.6055591197109657),
(1, 452, 606, 0.40744265053745055),
(1, 166, 879, 0.7509269177978337),
(2, 92, 729, 0.34554103785480306),
(2, 227, 959, 0.3006394689033981),
(2, 341, 699, 0.07798704843315862),
(2, 142, 513, 0.9933850573241767)])),
'aaababbaaabbbb')
def test_m2o_decode(self):
"""
Test correct decoding of many-to-one label sequence
"""
self.assertEqual(''.join(x[0] for x in self.m2o_codec.decode([(3, 28, 967, 0.07761440833942468),
(4, 282, 565, 0.4946281412618093),
(1, 411, 853, 0.7767301050586806),
(4, 409, 501, 0.47915609540996495),
(4, 299, 637, 0.7755889399450564),
(3, 340, 834, 0.726656062406549),
(4, 296, 846, 0.2274859668684881),
(4, 238, 695, 0.32982930128257815),
(4, 187, 970, 0.43354272748701805),
(4, 376, 863, 0.24483897879550764)])),
'aaababbaaabbbb')
def test_o2m_decode(self):
"""
Test correct decoding of one-to-many label sequence
"""
self.assertEqual(''.join(x[0] for x in self.o2m_codec.decode([(10, 35, 959, 0.43819571289990644),
(11, 361, 904, 0.1801115018592916),
(12, 15, 616, 0.5987506334315549),
(10, 226, 577, 0.6178248939780698),
(11, 227, 814, 0.31531097360327787),
(12, 390, 826, 0.7706594984014595),
(10, 251, 579, 0.9442530315305507),
(11, 269, 870, 0.4475979925584944),
(12, 456, 609, 0.9396137478409995),
(12, 60, 757, 0.06416607235266458),
(45, 318, 918, 0.8129458423341515),
(80, 15, 914, 0.49773432435726517),
(10, 211, 648, 0.7919220961861382),
(11, 326, 804, 0.7852387442556333),
(12, 93, 978, 0.9376801123379804),
(12, 23, 698, 0.915543635886972),
(45, 71, 599, 0.8137750423628737),
(80, 167, 980, 0.6501035181890226),
(12, 259, 823, 0.3122860659712233),
(45, 312, 948, 0.20582589628806058),
(80, 430, 694, 0.3528792552966924),
(10, 470, 866, 0.0685524032330419),
(11, 459, 826, 0.39354887700146846),
(12, 392, 926, 0.4102018609185847),
(10, 271, 592, 0.1877915301623876),
(11, 206, 995, 0.21614062190981576),
(12, 466, 648, 0.3106914763314057),
(10, 368, 848, 0.28715379701274113),
(11, 252, 962, 0.5535299604896257),
(12, 387, 709, 0.844810014550603),
(12, 156, 916, 0.9803695305965802),
(45, 150, 555, 0.5969071330809561),
(80, 381, 922, 0.5608300913697513),
(12, 35, 762, 0.5227506455088722),
(45, 364, 931, 0.7205481732247938),
(80, 341, 580, 0.536934566913969),
(12, 79, 919, 0.5136066153481802),
(45, 377, 773, 0.6507467790760987),
(80, 497, 931, 0.7635100185309783),
(12, 76, 580, 0.9542477438586341),
(45, 37, 904, 0.4299813924853797),
(80, 425, 638, 0.6825047210425983)])),
'aaababbaaabbbb')
def test_m2m_decode(self):
"""
Test correct decoding of many-to-many label sequence
"""
self.assertEqual(''.join(x[0] for x in self.m2m_codec.decode([(10, 313, 788, 0.9379917930525369),
(11, 117, 793, 0.9974374577004185),
(12, 50, 707, 0.020074164253385374),
(12, 382, 669, 0.525910770170754),
(10, 458, 833, 0.4292373233167248),
(15, 45, 831, 0.5759709886686226),
(10, 465, 729, 0.8492104897235935),
(11, 78, 800, 0.24733538459309445),
(12, 375, 872, 0.26908722769105353),
(15, 296, 889, 0.44251812620463726),
(15, 237, 930, 0.5456105208117391)])),
'aaabbbaaabbbb')
def test_o2o_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (one-to-one decoder)
"""
self.assertEqual(self.o2o_codec.decode(self.invalid_l_sequence), [])
def test_m2o_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (many-to-one decoder)
"""
self.assertEqual(self.m2o_codec.decode(self.invalid_l_sequence), [])
def test_o2m_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (one-to-many decoder)
"""
self.assertEqual(self.o2m_codec.decode(self.invalid_l_sequence),
[('a', 203, 831, 0.8195729875383888)])
def test_m2m_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (many-to-many decoder)
"""
self.assertEqual(self.m2m_codec.decode(self.invalid_l_sequence),
[('a', 203, 831, 0.8195729875383888),
('a', 203, 831, 0.8195729875383888),
('a', 203, 831, 0.8195729875383888)])
def test_o2o_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (one-to-one encoder)
"""
self.assertTrue(self.o2o_codec.encode(self.invalid_c_sequence).eq(
IntTensor([1, 1, 1, 2, 1, 2, 2, 1, 1, 1])).all())
def test_m2o_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (many-to-one encoder)
"""
self.assertTrue(self.m2o_codec.encode(self.invalid_c_sequence).eq(
IntTensor([3, 4, 1, 4, 4, 3])).all())
def test_o2m_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (one-to-many encoder)
"""
self.assertTrue(self.o2m_codec.encode(self.invalid_c_sequence).eq(
IntTensor([10, 11, 12, 10, 11, 12, 10, 11, 12, 12, 45,
80, 10, 11, 12, 12, 45, 80, 12, 45, 80, 10,
11, 12, 10, 11, 12, 10, 11, 12])).all())
def test_m2m_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (many-to-many encoder)
"""
self.assertTrue(self.m2m_codec.encode(self.invalid_c_sequence).eq(
IntTensor([10, 11, 12, 12, 11, 15, 10, 11, 12])).all())
def test_o2o_decode_invalid(self):
"""
Test correct handling of undecodable sequences (one-to-one decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2o_codec_strict.decode(self.invalid_l_sequence)
def test_m2o_decode_invalid(self):
"""
Test correct handling of undecodable sequences (many-to-one decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2o_codec_strict.decode(self.invalid_l_sequence)
def test_o2m_decode_invalid(self):
"""
Test correct handling of undecodable sequences (one-to-many decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2m_codec_strict.decode(self.invalid_l_sequence)
def test_m2m_decode_invalid(self):
"""
Test correct handling of undecodable sequences (many-to-many decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2m_codec_strict.decode(self.invalid_l_sequence)
def test_o2o_encode_invalid(self):
"""
Test correct handling of unencodable sequences (one-to-one encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2o_codec_strict.encode(self.invalid_c_sequence)
def test_m2o_encode_invalid(self):
"""
Test correct handling of unencodable sequences (many-to-one encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2o_codec_strict.encode(self.invalid_c_sequence)
def test_o2m_encode_invalid(self):
"""
Test correct handling of unencodable sequences (one-to-many encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2m_codec_strict.encode(self.invalid_c_sequence)
def test_m2m_encode_invalid(self):
"""
Test correct handling of unencodable sequences (many-to-many encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2m_codec_strict.encode(self.invalid_c_sequence)
def test_codec_add_simple(self):
"""
Test adding of new code points to codec.
"""
prev_len = len(self.o2o_codec)
codec = self.o2o_codec.add_labels('cde')
self.assertEqual(len(codec), prev_len + 3)
self.assertTrue(codec.encode(self.ada_sequence).eq(
IntTensor([3, 4, 1, 1, 2, 1, 5])).all())
def test_codec_add_list(self):
"""
Test adding of new code points to codec.
"""
prev_len = len(self.o2o_codec)
codec = self.o2o_codec.add_labels(['cd', 'e'])
self.assertEqual(len(codec), prev_len + 2)
self.assertTrue(codec.encode(self.ada_sequence).eq(
IntTensor([3, 1, 1, 2, 1, 4])).all())
def test_codec_add_dict(self):
"""
Test adding of new code points to codec.
"""
prev_len = len(self.o2o_codec)
codec = self.o2o_codec.add_labels({'cd': [3], 'e': [4]})
self.assertEqual(len(codec), prev_len + 2)
self.assertTrue(codec.encode(self.ada_sequence).eq(
IntTensor([3, 1, 1, 2, 1, 4])).all())
def test_codec_merge_both(self):
"""
Test merging of a codec adding and removing code points
"""
merge_codec = codec.PytorchCodec('acde')
new_codec, del_labels = self.o2o_codec.merge(merge_codec)
self.assertEqual(del_labels, {2})
self.assertEqual(new_codec.c2l, {'a': [1], 'c': [2], 'd': [3], 'e': [4]})
def test_codec_merge_add(self):
"""
Test merging of a codec adding and removing code points
"""
merge_codec = codec.PytorchCodec('abcde')
new_codec, del_labels = self.o2o_codec.merge(merge_codec)
self.assertEqual(del_labels, set())
self.assertEqual(new_codec.c2l, {'a': [1], 'b': [2], 'c': [3], 'd': [4], 'e': [5]})
def test_codec_merge_remove(self):
"""
Test merging of a codec removing code points
"""
merge_codec = codec.PytorchCodec('a')
new_codec, del_labels = self.o2o_codec.merge(merge_codec)
self.assertEqual(del_labels, {2})
self.assertEqual(new_codec.c2l, {'a': [1]})
| 18,763 | 53.231214 | 134 |
py
|
kraken
|
kraken-main/docs/conf.py
|
# -*- coding: utf-8 -*-
#
# kraken documentation build configuration file, created by
# sphinx-quickstart on Fri May 22 16:51:45 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from subprocess import Popen, PIPE
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('../kraken'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autodoc.typehints',
'autoapi.extension',
'sphinx.ext.napoleon',
'sphinx.ext.githubpages',
'sphinx_multiversion',
]
autodoc_typehints = 'description'
autoapi_type = 'python'
autoapi_dirs = ['../kraken']
autoapi_options = ['members',
'undoc-members',
#'private-members',
#'special-members',
'show-inheritance',
'show-module-summary',
#'imported-members',
]
autoapi_generate_api_docs = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kraken'
copyright = u'2015, mittagessen'
author = u'mittagessen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
pipe = Popen('git describe --tags --always main', stdout=PIPE, shell=True)
version = pipe.stdout.read().decode('utf-8')
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'mittagessen',
'github_repo': 'kraken',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/kraken.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'custom.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'navigation.html', 'searchbox.html', 'versions.html'],
'**': ['localtoc.html', 'relations.html', 'searchbox.html', 'versions.html']
}
html_baseurl = 'kraken.re'
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'krakendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'kraken.tex', 'kraken Documentation',
'mittagessen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kraken', 'kraken Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'kraken', 'kraken Documentation',
author, 'kraken', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
smv_branch_whitelist = r'main'
smv_tag_whitelist = r'^[2-9]\.\d+(\.0)?$'
| 10,222 | 30.847352 | 92 |
py
|
SSL4MIS
|
SSL4MIS-master/code/test_urpc_util.py
|
import math
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from skimage.measure import label
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y_main, y_aux1, y_aux2, y_aux3 = net(test_patch)
# ensemble
y_main = torch.softmax(y_main, dim=1)
y_aux1 = torch.softmax(y_aux1, dim=1)
y_aux2 = torch.softmax(y_aux2, dim=1)
y_aux3 = torch.softmax(y_aux3, dim=1)
y = y_main
# y = (y_main+y_aux1+y_aux2+y_aux3)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, method="unet_3D", test_list="full_test.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24, test_save_path=None):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes - 1, 4))
print("Testing begin")
with open(test_save_path + "/{}.txt".format(method), "a") as f:
for image_path in tqdm(image_list):
ids = image_path.split("/")[-1].replace(".h5", "")
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
metric = calculate_metric_percase(prediction == 1, label == 1)
total_metric[0, :] += metric
f.writelines("{},{},{},{},{}\n".format(
ids, metric[0], metric[1], metric[2], metric[3]))
pred_itk = sitk.GetImageFromArray(prediction.astype(np.uint8))
pred_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(pred_itk, test_save_path +
"/{}_pred.nii.gz".format(ids))
img_itk = sitk.GetImageFromArray(image)
img_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(img_itk, test_save_path +
"/{}_img.nii.gz".format(ids))
lab_itk = sitk.GetImageFromArray(label.astype(np.uint8))
lab_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(lab_itk, test_save_path +
"/{}_lab.nii.gz".format(ids))
f.writelines("Mean metrics,{},{},{},{}".format(total_metric[0, 0] / len(image_list), total_metric[0, 1] / len(
image_list), total_metric[0, 2] / len(image_list), total_metric[0, 3] / len(image_list)))
f.close()
print("Testing end")
return total_metric / len(image_list)
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction == i)
label_tmp = (label == i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / \
(np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
ravd = abs(metric.binary.ravd(pred, gt))
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return np.array([dice, ravd, hd, asd])
else:
return np.zeros(4)
| 6,418 | 38.623457 | 169 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_interpolation_consistency_training_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Interpolation_Consistency_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=300,
help='labeled data')
parser.add_argument('--ict_alpha', type=int, default=0.2,
help='ict_alpha')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
labeled_volume_batch = volume_batch[:args.labeled_bs]
# ICT mix factors
ict_mix_factors = np.random.beta(
args.ict_alpha, args.ict_alpha, size=(args.labeled_bs//2, 1, 1, 1))
ict_mix_factors = torch.tensor(
ict_mix_factors, dtype=torch.float).cuda()
unlabeled_volume_batch_0 = unlabeled_volume_batch[0:args.labeled_bs//2, ...]
unlabeled_volume_batch_1 = unlabeled_volume_batch[args.labeled_bs//2:, ...]
# Mix images
batch_ux_mixed = unlabeled_volume_batch_0 * \
(1.0 - ict_mix_factors) + \
unlabeled_volume_batch_1 * ict_mix_factors
input_volume_batch = torch.cat(
[labeled_volume_batch, batch_ux_mixed], dim=0)
outputs = model(input_volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output_ux0 = torch.softmax(
ema_model(unlabeled_volume_batch_0), dim=1)
ema_output_ux1 = torch.softmax(
ema_model(unlabeled_volume_batch_1), dim=1)
batch_pred_mixed = ema_output_ux0 * \
(1.0 - ict_mix_factors) + ema_output_ux1 * ict_mix_factors
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:] - batch_pred_mixed) ** 2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
image = batch_ux_mixed[1, 0:1, :, :]
writer.add_image('train/Mixed_Unlabeled',
image, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,984 | 41.996689 | 108 |
py
|
SSL4MIS
|
SSL4MIS-master/code/test_3D.py
|
import argparse
import os
import shutil
from glob import glob
import torch
from networks.unet_3D import unet_3D
from test_3D_util import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTS2019/Interpolation_Consistency_Training_25', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
def Inference(FLAGS):
snapshot_path = "../model/{}/{}".format(FLAGS.exp, FLAGS.model)
num_classes = 2
test_save_path = "../model/{}/Prediction".format(FLAGS.exp)
if os.path.exists(test_save_path):
shutil.rmtree(test_save_path)
os.makedirs(test_save_path)
net = unet_3D(n_classes=num_classes, in_channels=1).cuda()
save_mode_path = os.path.join(
snapshot_path, '{}_best_model.pth'.format(FLAGS.model))
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
avg_metric = test_all_case(net, base_dir=FLAGS.root_path, method=FLAGS.model, test_list="test.txt", num_classes=num_classes,
patch_size=(96, 96, 96), stride_xy=64, stride_z=64, test_save_path=test_save_path)
return avg_metric
if __name__ == '__main__':
FLAGS = parser.parse_args()
metric = Inference(FLAGS)
print(metric)
| 1,495 | 34.619048 | 128 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_adversarial_network_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.discriminator import FCDiscriminator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Adversarial_Network', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--DAN_lr', type=float, default=0.0001,
help='DAN learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
DAN = FCDiscriminator(num_classes=num_classes)
DAN = DAN.cuda()
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
DAN_optimizer = optim.Adam(
DAN.parameters(), lr=args.DAN_lr, betas=(0.9, 0.99))
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
DAN_target = torch.tensor([0] * args.batch_size).cuda()
DAN_target[:args.labeled_bs] = 1
model.train()
DAN.eval()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
DAN_outputs = DAN(
outputs_soft[args.labeled_bs:], volume_batch[args.labeled_bs:])
consistency_loss = F.cross_entropy(
DAN_outputs, (DAN_target[:args.labeled_bs]).long())
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
DAN.train()
with torch.no_grad():
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
DAN_outputs = DAN(outputs_soft, volume_batch)
DAN_loss = F.cross_entropy(DAN_outputs, DAN_target.long())
DAN_optimizer.zero_grad()
DAN_loss.backward()
DAN_optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,787 | 40.507042 | 108 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_mean_teacher_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
def create_model(ema=False):
# Network definition
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
ema_output_soft = torch.softmax(ema_output, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:] - ema_output_soft)**2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,142 | 40.890977 | 126 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_interpolation_consistency_training_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTS2019_Interpolation_Consistency_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=14,
help='labeled data')
parser.add_argument('--total_labeled_num', type=int, default=140,
help='total labeled data')
parser.add_argument('--ict_alpha', type=int, default=0.2,
help='ict_alpha')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
def create_model(ema=False):
# Network definition
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, args.total_labeled_num))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
labeled_volume_batch = volume_batch[:args.labeled_bs]
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
# ICT mix factors
ict_mix_factors = np.random.beta(
args.ict_alpha, args.ict_alpha, size=(args.labeled_bs//2, 1, 1, 1, 1))
ict_mix_factors = torch.tensor(
ict_mix_factors, dtype=torch.float).cuda()
unlabeled_volume_batch_0 = unlabeled_volume_batch[0:1, ...]
unlabeled_volume_batch_1 = unlabeled_volume_batch[1:2, ...]
# Mix images
batch_ux_mixed = unlabeled_volume_batch_0 * \
(1.0 - ict_mix_factors) + \
unlabeled_volume_batch_1 * ict_mix_factors
input_volume_batch = torch.cat(
[labeled_volume_batch, batch_ux_mixed], dim=0)
outputs = model(input_volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output_ux0 = torch.softmax(
ema_model(unlabeled_volume_batch_0), dim=1)
ema_output_ux1 = torch.softmax(
ema_model(unlabeled_volume_batch_1), dim=1)
batch_pred_mixed = ema_output_ux0 * \
(1.0 - ict_mix_factors) + ema_output_ux1 * ict_mix_factors
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:] - batch_pred_mixed)**2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=32, stride_z=32)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,251 | 41.989474 | 126 |
py
|
SSL4MIS
|
SSL4MIS-master/code/val_3D.py
|
import math
from glob import glob
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y1 = net(test_patch)
# ensemble
y = torch.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, test_list="full_test.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes-1, 2))
print("Validation begin")
for image_path in tqdm(image_list):
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
for i in range(1, num_classes):
total_metric[i-1, :] += cal_metric(label == i, prediction == i)
print("Validation end")
return total_metric / len(image_list)
| 4,073 | 36.722222 | 130 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_fully_supervised_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import BaseDataSets, RandomGenerator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume, test_single_volume_ds
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Fully_Supervised', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--labeled_num', type=int, default=50,
help='labeled data')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=labeled_slice, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs, label_batch[:].long())
loss_dice = dice_loss(outputs_soft, label_batch.unsqueeze(1))
loss = 0.5 * (loss_dice + loss_ce)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 8,999 | 40.09589 | 117 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_regularized_dropout_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Regularized_Dropout', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=4.0, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model1 = create_model()
model2 = create_model()
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance1 = 0.0
best_performance2 = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
model1_loss = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
model2_loss = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
r_drop_loss = losses.compute_kl_loss(outputs1[args.labeled_bs:], outputs2[args.labeled_bs:])
loss = model1_loss + model2_loss + consistency_weight * r_drop_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer1.param_groups:
param_group['lr'] = lr_
for param_group in optimizer2.param_groups:
param_group['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
writer.add_scalar('loss/r_drop_loss',
r_drop_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f r_drop_loss: %f' % (iter_num, model1_loss.item(), model2_loss.item(), r_drop_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs1, dim=1), dim=1, keepdim=True)
writer.add_image('train/model1_Prediction',
outputs[1, ...] * 50, iter_num)
outputs = torch.argmax(torch.softmax(
outputs2, dim=1), dim=1, keepdim=True)
writer.add_image('train/model2_Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance1 = np.mean(metric_list, axis=0)[0]
mean_hd951 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model1_val_mean_dice', performance1, iter_num)
writer.add_scalar('info/model1_val_mean_hd95', mean_hd951, iter_num)
if performance1 > best_performance1:
best_performance1 = performance1
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
logging.info(
'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951))
model1.train()
model2.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance2 = np.mean(metric_list, axis=0)[0]
mean_hd952 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model2_val_mean_dice', performance2, iter_num)
writer.add_scalar('info/model2_val_mean_hd95', mean_hd952, iter_num)
if performance2 > best_performance2:
best_performance2 = performance2
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
logging.info(
'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,926 | 42.141618 | 165 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_cross_pseudo_supervision_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Cross_Pseudo_Supervision', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
net1 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
net2 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
model1 = kaiming_normal_init_weight(net1)
model2 = xavier_normal_init_weight(net2)
model1.train()
model2.train()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
best_performance1 = 0.0
best_performance2 = 0.0
iter_num = 0
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
pseudo_outputs1 = torch.argmax(outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_outputs2 = torch.argmax(outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_supervision1 = ce_loss(outputs1[args.labeled_bs:], pseudo_outputs2)
pseudo_supervision2 = ce_loss(outputs2[args.labeled_bs:], pseudo_outputs1)
model1_loss = loss1 + consistency_weight * pseudo_supervision1
model2_loss = loss2 + consistency_weight * pseudo_supervision2
loss = model1_loss + model2_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group1 in optimizer1.param_groups:
param_group1['lr'] = lr_
for param_group2 in optimizer2.param_groups:
param_group2['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
logging.info(
'iteration %d : model1 loss : %f model2 loss : %f' % (iter_num, model1_loss.item(), model2_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft1[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model1_Predicted_label',
grid_image, iter_num)
image = outputs_soft2[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model2_Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
avg_metric1 = test_all_case(
model1, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric1[:, 0].mean() > best_performance1:
best_performance1 = avg_metric1[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
writer.add_scalar('info/model1_val_dice_score',
avg_metric1[0, 0], iter_num)
writer.add_scalar('info/model1_val_hd95',
avg_metric1[0, 1], iter_num)
logging.info(
'iteration %d : model1_dice_score : %f model1_hd95 : %f' % (
iter_num, avg_metric1[0, 0].mean(), avg_metric1[0, 1].mean()))
model1.train()
model2.eval()
avg_metric2 = test_all_case(
model2, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric2[:, 0].mean() > best_performance2:
best_performance2 = avg_metric2[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
writer.add_scalar('info/model2_val_dice_score',
avg_metric2[0, 0], iter_num)
writer.add_scalar('info/model2_val_hd95',
avg_metric2[0, 1], iter_num)
logging.info(
'iteration %d : model2_dice_score : %f model2_hd95 : %f' % (
iter_num, avg_metric2[0, 0].mean(), avg_metric2[0, 1].mean()))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,030 | 42.574534 | 120 |
py
|
SSL4MIS
|
SSL4MIS-master/code/val_urpc_util.py
|
import math
from glob import glob
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y1, _, _, _ = net(test_patch)
# ensemble
y = torch.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, test_list="val.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes-1, 2))
print("Validation begin")
for image_path in tqdm(image_list):
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
for i in range(1, num_classes):
total_metric[i-1, :] += cal_metric(label == i, prediction == i)
print("Validation end")
return total_metric / len(image_list)
| 4,076 | 36.75 | 124 |
py
|
SSL4MIS
|
SSL4MIS-master/code/test_2D_fully.py
|
import argparse
import os
import shutil
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
from medpy import metric
from scipy.ndimage import zoom
from scipy.ndimage.interpolation import zoom
from tqdm import tqdm
# from networks.efficientunet import UNet
from networks.net_factory import net_factory
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Fully_Supervised', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
dice = metric.binary.dc(pred, gt)
asd = metric.binary.asd(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95, asd
def test_single_volume(case, net, test_save_path, FLAGS):
h5f = h5py.File(FLAGS.root_path + "/data/{}.h5".format(case), 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (256 / x, 256 / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
if FLAGS.model == "unet_urds":
out_main, _, _, _ = net(input)
else:
out_main = net(input)
out = torch.argmax(torch.softmax(
out_main, dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / 256, y / 256), order=0)
prediction[ind] = pred
first_metric = calculate_metric_percase(prediction == 1, label == 1)
second_metric = calculate_metric_percase(prediction == 2, label == 2)
third_metric = calculate_metric_percase(prediction == 3, label == 3)
img_itk = sitk.GetImageFromArray(image.astype(np.float32))
img_itk.SetSpacing((1, 1, 10))
prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
prd_itk.SetSpacing((1, 1, 10))
lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
lab_itk.SetSpacing((1, 1, 10))
sitk.WriteImage(prd_itk, test_save_path + case + "_pred.nii.gz")
sitk.WriteImage(img_itk, test_save_path + case + "_img.nii.gz")
sitk.WriteImage(lab_itk, test_save_path + case + "_gt.nii.gz")
return first_metric, second_metric, third_metric
def Inference(FLAGS):
with open(FLAGS.root_path + '/test.list', 'r') as f:
image_list = f.readlines()
image_list = sorted([item.replace('\n', '').split(".")[0]
for item in image_list])
snapshot_path = "../model/{}_{}_labeled/{}".format(
FLAGS.exp, FLAGS.labeled_num, FLAGS.model)
test_save_path = "../model/{}_{}_labeled/{}_predictions/".format(
FLAGS.exp, FLAGS.labeled_num, FLAGS.model)
if os.path.exists(test_save_path):
shutil.rmtree(test_save_path)
os.makedirs(test_save_path)
net = net_factory(net_type=FLAGS.model, in_chns=1,
class_num=FLAGS.num_classes)
save_mode_path = os.path.join(
snapshot_path, '{}_best_model.pth'.format(FLAGS.model))
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
first_total = 0.0
second_total = 0.0
third_total = 0.0
for case in tqdm(image_list):
first_metric, second_metric, third_metric = test_single_volume(
case, net, test_save_path, FLAGS)
first_total += np.asarray(first_metric)
second_total += np.asarray(second_metric)
third_total += np.asarray(third_metric)
avg_metric = [first_total / len(image_list), second_total /
len(image_list), third_total / len(image_list)]
return avg_metric
if __name__ == '__main__':
FLAGS = parser.parse_args()
metric = Inference(FLAGS)
print(metric)
print((metric[0]+metric[1]+metric[2])/3)
| 4,413 | 36.40678 | 76 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_deep_co_training_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.discriminator import FCDiscriminator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Deep_Co_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
rot_times = random.randrange(0,4)
rotated_unlabeled_volume_batch = torch.rot90(unlabeled_volume_batch, rot_times, [2,3])
unlabeled_rot_outputs = model(rotated_unlabeled_volume_batch)
unlabeled_rot_outputs_soft = torch.softmax(unlabeled_rot_outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = 0.5 * (torch.mean((unlabeled_rot_outputs_soft.detach() - torch.rot90(outputs_soft[args.labeled_bs:], rot_times, [2,3]))**2) + torch.mean((unlabeled_rot_outputs_soft - torch.rot90(outputs_soft[args.labeled_bs:].detach(), rot_times, [2,3]))**2))
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,350 | 41.354478 | 274 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_entropy_minimization_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Entropy_Minimization', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = losses.entropy_loss(outputs_soft, C=2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 10,388 | 41.060729 | 126 |
py
|
SSL4MIS
|
SSL4MIS-master/code/config.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------'
import os
import yaml
from yacs.config import CfgNode as CN
_C = CN()
# Base config files
_C.BASE = ['']
# -----------------------------------------------------------------------------
# Data settings
# -----------------------------------------------------------------------------
_C.DATA = CN()
# Batch size for a single GPU, could be overwritten by command line argument
_C.DATA.BATCH_SIZE = 128
# Path to dataset, could be overwritten by command line argument
_C.DATA.DATA_PATH = ''
# Dataset name
_C.DATA.DATASET = 'imagenet'
# Input image size
_C.DATA.IMG_SIZE = 224
# Interpolation to resize image (random, bilinear, bicubic)
_C.DATA.INTERPOLATION = 'bicubic'
# Use zipped dataset instead of folder dataset
# could be overwritten by command line argument
_C.DATA.ZIP_MODE = False
# Cache Data in Memory, could be overwritten by command line argument
_C.DATA.CACHE_MODE = 'part'
# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.DATA.PIN_MEMORY = True
# Number of data loading threads
_C.DATA.NUM_WORKERS = 8
# -----------------------------------------------------------------------------
# Model settings
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model type
_C.MODEL.TYPE = 'swin'
# Model name
_C.MODEL.NAME = 'swin_tiny_patch4_window7_224'
# Checkpoint to resume, could be overwritten by command line argument
_C.MODEL.PRETRAIN_CKPT = './pretrained_ckpt/swin_tiny_patch4_window7_224.pth'
_C.MODEL.RESUME = ''
# Number of classes, overwritten in data preparation
_C.MODEL.NUM_CLASSES = 1000
# Dropout rate
_C.MODEL.DROP_RATE = 0.0
# Drop path rate
_C.MODEL.DROP_PATH_RATE = 0.1
# Label Smoothing
_C.MODEL.LABEL_SMOOTHING = 0.1
# Swin Transformer parameters
_C.MODEL.SWIN = CN()
_C.MODEL.SWIN.PATCH_SIZE = 4
_C.MODEL.SWIN.IN_CHANS = 3
_C.MODEL.SWIN.EMBED_DIM = 96
_C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
_C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2]
_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
_C.MODEL.SWIN.WINDOW_SIZE = 7
_C.MODEL.SWIN.MLP_RATIO = 4.
_C.MODEL.SWIN.QKV_BIAS = True
_C.MODEL.SWIN.QK_SCALE = None
_C.MODEL.SWIN.APE = False
_C.MODEL.SWIN.PATCH_NORM = True
_C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first"
# -----------------------------------------------------------------------------
# Training settings
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.START_EPOCH = 0
_C.TRAIN.EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 5e-4
_C.TRAIN.WARMUP_LR = 5e-7
_C.TRAIN.MIN_LR = 5e-6
# Clip gradient norm
_C.TRAIN.CLIP_GRAD = 5.0
# Auto resume from latest checkpoint
_C.TRAIN.AUTO_RESUME = True
# Gradient accumulation steps
# could be overwritten by command line argument
_C.TRAIN.ACCUMULATION_STEPS = 0
# Whether to use gradient checkpointing to save memory
# could be overwritten by command line argument
_C.TRAIN.USE_CHECKPOINT = False
# LR scheduler
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
# Epoch interval to decay LR, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
# LR decay rate, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
# Optimizer
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'adamw'
# Optimizer Epsilon
_C.TRAIN.OPTIMIZER.EPS = 1e-8
# Optimizer Betas
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
# SGD momentum
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# -----------------------------------------------------------------------------
# Augmentation settings
# -----------------------------------------------------------------------------
_C.AUG = CN()
# Color jitter factor
_C.AUG.COLOR_JITTER = 0.4
# Use AutoAugment policy. "v0" or "original"
_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
# Random erase prob
_C.AUG.REPROB = 0.25
# Random erase mode
_C.AUG.REMODE = 'pixel'
# Random erase count
_C.AUG.RECOUNT = 1
# Mixup alpha, mixup enabled if > 0
_C.AUG.MIXUP = 0.8
# Cutmix alpha, cutmix enabled if > 0
_C.AUG.CUTMIX = 1.0
# Cutmix min/max ratio, overrides alpha and enables cutmix if set
_C.AUG.CUTMIX_MINMAX = None
# Probability of performing mixup or cutmix when either/both is enabled
_C.AUG.MIXUP_PROB = 1.0
# Probability of switching to cutmix when both mixup and cutmix enabled
_C.AUG.MIXUP_SWITCH_PROB = 0.5
# How to apply mixup/cutmix params. Per "batch", "pair", or "elem"
_C.AUG.MIXUP_MODE = 'batch'
# -----------------------------------------------------------------------------
# Testing settings
# -----------------------------------------------------------------------------
_C.TEST = CN()
# Whether to use center crop when testing
_C.TEST.CROP = True
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2')
# overwritten by command line argument
_C.AMP_OPT_LEVEL = ''
# Path to output folder, overwritten by command line argument
_C.OUTPUT = ''
# Tag of experiment, overwritten by command line argument
_C.TAG = 'default'
# Frequency to save checkpoint
_C.SAVE_FREQ = 1
# Frequency to logging info
_C.PRINT_FREQ = 10
# Fixed random seed
_C.SEED = 0
# Perform evaluation only, overwritten by command line argument
_C.EVAL_MODE = False
# Test throughput only, overwritten by command line argument
_C.THROUGHPUT_MODE = False
# local rank for DistributedDataParallel, given by command line argument
_C.LOCAL_RANK = 0
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
# merge from specific arguments
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.zip:
config.DATA.ZIP_MODE = True
if args.cache_mode:
config.DATA.CACHE_MODE = args.cache_mode
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.use_checkpoint:
config.TRAIN.USE_CHECKPOINT = True
if args.amp_opt_level:
config.AMP_OPT_LEVEL = args.amp_opt_level
if args.tag:
config.TAG = args.tag
if args.eval:
config.EVAL_MODE = True
if args.throughput:
config.THROUGHPUT_MODE = True
config.freeze()
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
| 7,353 | 30.973913 | 79 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_cross_consistency_training_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import BaseDataSets, RandomGenerator, TwoStreamBatchSampler
from utils import losses, metrics, ramps
from val_2D import test_single_volume_ds
from networks.net_factory import net_factory
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Consistency_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_cct', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
kl_distance = nn.KLDivLoss(reduction='none')
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs, outputs_aux1, outputs_aux2, outputs_aux3 = model(
volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
outputs_aux1_soft = torch.softmax(outputs_aux1, dim=1)
outputs_aux2_soft = torch.softmax(outputs_aux2, dim=1)
outputs_aux3_soft = torch.softmax(outputs_aux3, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux1 = ce_loss(outputs_aux1[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux2 = ce_loss(outputs_aux2[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux3 = ce_loss(outputs_aux3[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux1 = dice_loss(
outputs_aux1_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux2 = dice_loss(
outputs_aux2_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux3 = dice_loss(
outputs_aux3_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = (loss_ce + loss_ce_aux1 + loss_ce_aux2 + loss_ce_aux3 +
loss_dice + loss_dice_aux1 + loss_dice_aux2 + loss_dice_aux3) / 8
consistency_weight = get_current_consistency_weight(iter_num // 150)
consistency_loss_aux1 = torch.mean(
(outputs_soft[args.labeled_bs:] - outputs_aux1_soft[args.labeled_bs:]) ** 2)
consistency_loss_aux2 = torch.mean(
(outputs_soft[args.labeled_bs:] - outputs_aux2_soft[args.labeled_bs:]) ** 2)
consistency_loss_aux3 = torch.mean(
(outputs_soft[args.labeled_bs:] - outputs_aux3_soft[args.labeled_bs:]) ** 2)
consistency_loss = (consistency_loss_aux1 + consistency_loss_aux2 + consistency_loss_aux3) / 3
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume_ds(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes - 1):
writer.add_scalar('info/val_{}_dice'.format(class_i + 1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i + 1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,274 | 43.154676 | 108 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_fixmatch_cta.py
|
import argparse
import logging
import os
import re
import random
import shutil
import sys
import time
from xml.etree.ElementInclude import default_loader
from more_itertools import sample
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.distributions import Categorical
from torchvision import transforms
from torchvision.utils import make_grid, save_image
from tqdm import tqdm
import augmentations
from PIL import Image
from dataloaders import utils
from dataloaders.dataset import (
BaseDataSets,
CTATransform,
RandomGenerator,
TwoStreamBatchSampler,
WeakStrongAugment,
)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps, util
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument("--root_path", type=str, default="../data/ACDC", help="Name of Experiment")
parser.add_argument("--exp", type=str, default="ACDC/FixMatch", help="experiment_name")
parser.add_argument("--model", type=str, default="unet", help="model_name")
parser.add_argument("--max_iterations", type=int, default=30000, help="maximum epoch number to train")
parser.add_argument("--batch_size", type=int, default=24, help="batch_size per gpu")
parser.add_argument("--deterministic", type=int, default=1, help="whether use deterministic training")
parser.add_argument("--base_lr", type=float, default=0.01, help="segmentation network learning rate")
parser.add_argument("--patch_size", type=list, default=[256, 256], help="patch size of network input")
parser.add_argument("--seed", type=int, default=1337, help="random seed")
parser.add_argument("--num_classes", type=int, default=4, help="output channel of network")
parser.add_argument("--load", default=False, action="store_true", help="restore previous checkpoint")
parser.add_argument(
"--conf_thresh",
type=float,
default=0.8,
help="confidence threshold for using pseudo-labels",
)
parser.add_argument("--labeled_bs", type=int, default=12, help="labeled_batch_size per gpu")
# parser.add_argument('--labeled_num', type=int, default=136,
parser.add_argument("--labeled_num", type=int, default=7, help="labeled data")
# costs
parser.add_argument("--ema_decay", type=float, default=0.99, help="ema_decay")
parser.add_argument("--consistency_type", type=str, default="mse", help="consistency_type")
parser.add_argument("--consistency", type=float, default=0.1, help="consistency")
parser.add_argument("--consistency_rampup", type=float, default=200.0, help="consistency_rampup")
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {
"3": 68,
"7": 136,
"14": 256,
"21": 396,
"28": 512,
"35": 664,
"140": 1312,
}
elif "Prostate":
ref_dict = {
"2": 27,
"4": 53,
"8": 120,
"12": 179,
"16": 256,
"21": 312,
"42": 623,
}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# teacher network: ema_model
# student network: model
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
def refresh_policies(db_train, cta):
db_train.ops_weak = cta.policy(probe=False, weak=True)
db_train.ops_strong = cta.policy(probe=False, weak=False)
logging.info(f"\nWeak Policy: {db_train.ops_weak}")
logging.info(f"Strong Policy: {db_train.ops_strong}")
cta = augmentations.ctaugment.CTAugment()
transform = CTATransform(args.patch_size, cta)
# sample initial weak and strong augmentation policies (CTAugment)
ops_weak = cta.policy(probe=False, weak=True)
ops_strong = cta.policy(probe=False, weak=False)
db_train = BaseDataSets(
base_dir=args.root_path,
split="train",
num=None,
transform=transform,
ops_weak=ops_weak,
ops_strong=ops_strong,
)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
model = create_model()
ema_model = create_model(ema=True)
iter_num = 0
start_epoch = 0
# instantiate optimizers
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
# if restoring previous models:
if args.load:
try:
# check if there is previous progress to be restored:
logging.info(f"Snapshot path: {snapshot_path}")
iter_num = []
for filename in os.listdir(snapshot_path):
if "model_iter" in filename:
basename, extension = os.path.splitext(filename)
iter_num.append(int(basename.split("_")[2]))
iter_num = max(iter_num)
for filename in os.listdir(snapshot_path):
if "model_iter" in filename and str(iter_num) in filename:
model_checkpoint = filename
except Exception as e:
logging.warning(f"Error finding previous checkpoints: {e}")
try:
logging.info(f"Restoring model checkpoint: {model_checkpoint}")
model, optimizer, start_epoch, performance = util.load_checkpoint(
snapshot_path + "/" + model_checkpoint, model, optimizer
)
logging.info(f"Models restored from iteration {iter_num}")
except Exception as e:
logging.warning(f"Unable to restore model checkpoint: {e}, using new model")
trainloader = DataLoader(
db_train,
batch_sampler=batch_sampler,
num_workers=4,
pin_memory=True,
worker_init_fn=worker_init_fn,
)
valloader = DataLoader(db_val, batch_size=1, shuffle=False, num_workers=1)
model.train()
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + "/log")
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iter_num = int(iter_num)
iterator = tqdm(range(start_epoch, max_epoch), ncols=70)
for epoch_num in iterator:
# track mean error for entire epoch
epoch_errors = []
# refresh augmentation policies with each new epoch
refresh_policies(db_train, cta)
for i_batch, sampled_batch in enumerate(trainloader):
weak_batch, strong_batch, label_batch = (
sampled_batch["image_weak"],
sampled_batch["image_strong"],
sampled_batch["label_aug"],
)
weak_batch, strong_batch, label_batch = (
weak_batch.cuda(),
strong_batch.cuda(),
label_batch.cuda(),
)
# handle unfavorable cropping
non_zero_ratio = torch.count_nonzero(label_batch) / (24 * 256 * 256)
if non_zero_ratio <= 0.02:
logging.info("Refreshing policy...")
refresh_policies(db_train, cta)
continue
# model preds
outputs_weak = model(weak_batch)
outputs_weak_soft = torch.softmax(outputs_weak, dim=1)
outputs_strong = model(strong_batch)
outputs_strong_soft = torch.softmax(outputs_strong, dim=1)
# getting pseudo labels
with torch.no_grad():
ema_outputs_soft = torch.softmax(ema_model(weak_batch), dim=1)
pseudo_outputs = torch.argmax(
ema_outputs_soft.detach(),
dim=1,
keepdim=False,
)
consistency_weight = get_current_consistency_weight(iter_num // 150)
# supervised loss (weak preds against ground truth)
sup_loss = ce_loss(outputs_weak[: args.labeled_bs], label_batch[:][: args.labeled_bs].long(),) + dice_loss(
outputs_weak_soft[: args.labeled_bs],
label_batch[: args.labeled_bs].unsqueeze(1),
)
# unsupervised loss (strong preds against pseudo label)
unsup_loss = ce_loss(outputs_strong[args.labeled_bs :], pseudo_outputs[args.labeled_bs :]) + dice_loss(
outputs_strong_soft[args.labeled_bs :],
pseudo_outputs[args.labeled_bs :].unsqueeze(1),
)
loss = sup_loss + consistency_weight * unsup_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
iter_num = iter_num + 1
# track batch-level error, used to update augmentation policy
epoch_errors.append(0.5 * loss.item())
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = lr_
writer.add_scalar("lr", lr_, iter_num)
writer.add_scalar("consistency_weight/consistency_weight", consistency_weight, iter_num)
writer.add_scalar("loss/model_loss", loss, iter_num)
logging.info("iteration %d : model loss : %f" % (iter_num, loss.item()))
if iter_num % 50 == 0:
# show weakly augmented image
image = weak_batch[1, 0:1, :, :]
writer.add_image("train/Image", image, iter_num)
# show strongly augmented image
image_strong = strong_batch[1, 0:1, :, :]
writer.add_image("train/StrongImage", image_strong, iter_num)
# show model prediction (strong augment)
outputs_strong = torch.argmax(outputs_strong_soft, dim=1, keepdim=True)
writer.add_image("train/model_Prediction", outputs_strong[1, ...] * 50, iter_num)
# show ground truth label
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image("train/GroundTruth", labs, iter_num)
# show generated pseudo label
pseudo_labs = pseudo_outputs[1, ...].unsqueeze(0) * 50
writer.add_image("train/PseudoLabel", pseudo_labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
ema_model.eval()
metric_list = 0.0
with torch.no_grad():
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"],
sampled_batch["label"],
ema_model,
classes=num_classes,
)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes - 1):
writer.add_scalar(
"info/model_val_{}_dice".format(class_i + 1),
metric_list[class_i, 0],
iter_num,
)
writer.add_scalar(
"info/model_val_{}_hd95".format(class_i + 1),
metric_list[class_i, 1],
iter_num,
)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar("info/model_val_mean_dice", performance, iter_num)
writer.add_scalar("info/model_val_mean_hd95", mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(
snapshot_path,
"model_iter_{}_dice_{}.pth".format(iter_num, round(best_performance, 4)),
)
save_best = os.path.join(snapshot_path, "{}_best_model.pth".format(args.model))
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
util.save_checkpoint(epoch_num, model, optimizer, loss, save_best)
logging.info(
"iteration %d : model_mean_dice : %f model_mean_hd95 : %f" % (iter_num, performance, mean_hd95)
)
model.train()
ema_model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(snapshot_path, "model_iter_" + str(iter_num) + ".pth")
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
# update policy parameter bins for sampling
mean_epoch_error = np.mean(epoch_errors)
cta.update_rates(db_train.ops_weak, 1.0 - 0.5 * mean_epoch_error)
cta.update_rates(db_train.ops_strong, 1.0 - 0.5 * mean_epoch_error)
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + "/code"):
shutil.rmtree(snapshot_path + "/code")
shutil.copytree(".", snapshot_path + "/code", shutil.ignore_patterns([".git", "__pycache__"]))
logging.basicConfig(
filename=snapshot_path + "/log.txt",
level=logging.INFO,
format="[%(asctime)s.%(msecs)03d] %(message)s",
datefmt="%H:%M:%S",
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 16,647 | 38.079812 | 119 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_fully_supervised_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Fully_Supervised', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=2,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
args = parser.parse_args()
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
model = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=args.labeled_num,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs, label_batch)
loss_dice = dice_loss(outputs_soft, label_batch.unsqueeze(1))
loss = 0.5 * (loss_dice + loss_ce)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}/{}".format(args.exp, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 8,444 | 40.397059 | 126 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_entropy_minimization_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.discriminator import FCDiscriminator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Entropy_Minimization', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = losses.entropy_loss(outputs_soft, C=4)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 10,840 | 40.857143 | 108 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_fixmatch_standard_augs.py
|
import argparse
import logging
import os
import re
import random
import shutil
import sys
import time
from xml.etree.ElementInclude import default_loader
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.distributions import Categorical
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (
BaseDataSets,
RandomGenerator,
TwoStreamBatchSampler,
WeakStrongAugment,
)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps, util
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument("--root_path", type=str, default="../data/ACDC", help="Name of Experiment")
parser.add_argument("--exp", type=str, default="ACDC/FixMatch_standard_augs", help="experiment_name")
parser.add_argument("--model", type=str, default="unet", help="model_name")
parser.add_argument("--max_iterations", type=int, default=30000, help="maximum epoch number to train")
parser.add_argument("--batch_size", type=int, default=24, help="batch_size per gpu")
parser.add_argument("--deterministic", type=int, default=1, help="whether use deterministic training")
parser.add_argument("--base_lr", type=float, default=0.01, help="segmentation network learning rate")
parser.add_argument("--patch_size", type=list, default=[256, 256], help="patch size of network input")
parser.add_argument("--seed", type=int, default=1337, help="random seed")
parser.add_argument("--num_classes", type=int, default=4, help="output channel of network")
parser.add_argument("--load", default=False, action="store_true", help="restore previous checkpoint")
parser.add_argument(
"--conf_thresh",
type=float,
default=0.8,
help="confidence threshold for using pseudo-labels",
)
parser.add_argument("--labeled_bs", type=int, default=12, help="labeled_batch_size per gpu")
# parser.add_argument('--labeled_num', type=int, default=136,
parser.add_argument("--labeled_num", type=int, default=7, help="labeled data")
# costs
parser.add_argument("--ema_decay", type=float, default=0.99, help="ema_decay")
parser.add_argument("--consistency_type", type=str, default="mse", help="consistency_type")
parser.add_argument("--consistency", type=float, default=0.1, help="consistency")
parser.add_argument("--consistency_rampup", type=float, default=200.0, help="consistency_rampup")
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {
"3": 68,
"7": 136,
"14": 256,
"21": 396,
"28": 512,
"35": 664,
"140": 1312,
}
elif "Prostate":
ref_dict = {
"2": 27,
"4": 53,
"8": 120,
"12": 179,
"16": 256,
"21": 312,
"42": 623,
}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# teacher network: ema_model
# student network: model
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
def get_comp_loss(weak, strong):
"""get complementary loss and adaptive sample weight.
Compares least likely prediction (from strong augment) with argmin of weak augment.
Args:
weak (batch): weakly augmented batch
strong (batch): strongly augmented batch
Returns:
comp_loss, as_weight
"""
il_output = torch.reshape(
strong,
(
args.batch_size,
args.num_classes,
args.patch_size[0] * args.patch_size[1],
),
)
# calculate entropy for image-level preds (tensor of length labeled_bs)
as_weight = 1 - (Categorical(probs=il_output).entropy() / np.log(args.patch_size[0] * args.patch_size[1]))
# batch level average of entropy
as_weight = torch.mean(as_weight)
# complementary loss
comp_labels = torch.argmin(weak.detach(), dim=1, keepdim=False)
comp_loss = as_weight * ce_loss(
torch.add(torch.negative(strong), 1),
comp_labels,
)
return comp_loss, as_weight
def normalize(tensor):
min_val = tensor.min(1, keepdim=True)[0]
max_val = tensor.max(1, keepdim=True)[0]
result = tensor - min_val
result = result / max_val
return result
db_train = BaseDataSets(
base_dir=args.root_path,
split="train",
num=None,
transform=transforms.Compose([WeakStrongAugment(args.patch_size)]),
)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
model = create_model()
# create model for ema (this model produces pseudo-labels)
ema_model = create_model(ema=True)
iter_num = 0
start_epoch = 0
# instantiate optimizers
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
# if restoring previous models:
if args.load:
try:
# check if there is previous progress to be restored:
logging.info(f"Snapshot path: {snapshot_path}")
iter_num = []
for filename in os.listdir(snapshot_path):
if "model_iter" in filename:
basename, extension = os.path.splitext(filename)
iter_num.append(int(basename.split("_")[2]))
iter_num = max(iter_num)
for filename in os.listdir(snapshot_path):
if "model_iter" in filename and str(iter_num) in filename:
model_checkpoint = filename
except Exception as e:
logging.warning(f"Error finding previous checkpoints: {e}")
try:
logging.info(f"Restoring model checkpoint: {model_checkpoint}")
model, optimizer, start_epoch, performance = util.load_checkpoint(
snapshot_path + "/" + model_checkpoint, model, optimizer
)
logging.info(f"Models restored from iteration {iter_num}")
except Exception as e:
logging.warning(f"Unable to restore model checkpoint: {e}, using new model")
trainloader = DataLoader(
db_train,
batch_sampler=batch_sampler,
num_workers=4,
pin_memory=True,
worker_init_fn=worker_init_fn,
)
valloader = DataLoader(db_val, batch_size=1, shuffle=False, num_workers=1)
# set to train
model.train()
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + "/log")
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iter_num = int(iter_num)
iterator = tqdm(range(start_epoch, max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
weak_batch, strong_batch, label_batch = (
sampled_batch["image_weak"],
sampled_batch["image_strong"],
sampled_batch["label_aug"],
)
weak_batch, strong_batch, label_batch = (
weak_batch.cuda(),
strong_batch.cuda(),
label_batch.cuda(),
)
# outputs for model
outputs_weak = model(weak_batch)
outputs_weak_soft = torch.softmax(outputs_weak, dim=1)
outputs_strong = model(strong_batch)
outputs_strong_soft = torch.softmax(outputs_strong, dim=1)
# minmax normalization for softmax outputs before applying mask
pseudo_mask = (normalize(outputs_weak_soft) > args.conf_thresh).float()
outputs_weak_masked = outputs_weak_soft * pseudo_mask
pseudo_outputs = torch.argmax(outputs_weak_masked[args.labeled_bs :].detach(), dim=1, keepdim=False)
consistency_weight = get_current_consistency_weight(iter_num // 150)
# supervised loss
sup_loss = ce_loss(outputs_weak[: args.labeled_bs], label_batch[:][: args.labeled_bs].long(),) + dice_loss(
outputs_weak_soft[: args.labeled_bs],
label_batch[: args.labeled_bs].unsqueeze(1),
)
# complementary loss and adaptive sample weight for negative learning
comp_loss, as_weight = get_comp_loss(weak=outputs_weak_soft, strong=outputs_strong_soft)
# unsupervised loss
unsup_loss = (
ce_loss(outputs_strong[args.labeled_bs :], pseudo_outputs)
+ dice_loss(outputs_strong_soft[args.labeled_bs :], pseudo_outputs.unsqueeze(1))
+ as_weight * comp_loss
)
loss = sup_loss + consistency_weight * unsup_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update ema model
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
# update learning rate
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = lr_
iter_num = iter_num + 1
writer.add_scalar("lr", lr_, iter_num)
writer.add_scalar("consistency_weight/consistency_weight", consistency_weight, iter_num)
writer.add_scalar("loss/model_loss", loss, iter_num)
logging.info("iteration %d : model loss : %f" % (iter_num, loss.item()))
if iter_num % 50 == 0:
image = weak_batch[1, 0:1, :, :]
writer.add_image("train/Image", image, iter_num)
outputs_weak = torch.argmax(torch.softmax(outputs_weak, dim=1), dim=1, keepdim=True)
writer.add_image("train/model_Prediction", outputs_weak[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image("train/GroundTruth", labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"],
sampled_batch["label"],
model,
classes=num_classes,
)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes - 1):
writer.add_scalar(
"info/model_val_{}_dice".format(class_i + 1),
metric_list[class_i, 0],
iter_num,
)
writer.add_scalar(
"info/model_val_{}_hd95".format(class_i + 1),
metric_list[class_i, 1],
iter_num,
)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar("info/model_val_mean_dice", performance, iter_num)
writer.add_scalar("info/model_val_mean_hd95", mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(
snapshot_path,
"model_iter_{}_dice_{}.pth".format(iter_num, round(best_performance, 4)),
)
save_best = os.path.join(snapshot_path, "{}_best_model.pth".format(args.model))
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
util.save_checkpoint(epoch_num, model, optimizer, loss, save_best)
logging.info(
"iteration %d : model_mean_dice : %f model_mean_hd95 : %f" % (iter_num, performance, mean_hd95)
)
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(snapshot_path, "model_iter_" + str(iter_num) + ".pth")
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + "/code"):
shutil.rmtree(snapshot_path + "/code")
shutil.copytree(".", snapshot_path + "/code", shutil.ignore_patterns([".git", "__pycache__"]))
logging.basicConfig(
filename=snapshot_path + "/log.txt",
level=logging.INFO,
format="[%(asctime)s.%(msecs)03d] %(message)s",
datefmt="%H:%M:%S",
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 16,133 | 37.141844 | 119 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_cross_teaching_between_cnn_transformer_2D.py
|
# -*- coding: utf-8 -*-
# Author: Xiangde Luo
# Date: 16 Dec. 2021
# Implementation for Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer.
# # Reference:
# @article{luo2021ctbct,
# title={Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer},
# author={Luo, Xiangde and Hu, Minhao and Song, Tao and Wang, Guotai and Zhang, Shaoting},
# journal={arXiv preprint arXiv:2112.04894},
# year={2021}}
# In the original paper, we don't use the validation set to select checkpoints and use the last iteration to inference for all methods.
# In addition, we combine the validation set and test set to report the results.
# We found that the random data split has some bias (the validation set is very tough and the test set is very easy).
# Actually, this setting is also a fair comparison.
# download pre-trained model to "code/pretrained_ckpt" folder, link:https://drive.google.com/drive/folders/1UC3XOoezeum0uck4KBVGa8osahs6rKUY
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
from config import get_config
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from networks.vision_transformer import SwinUnet as ViT_seg
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Teaching_Between_CNN_Transformer', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=16,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[224, 224],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument(
'--cfg', type=str, default="../code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true',
help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int,
help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true',
help='Test throughput only')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=8,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
config = get_config(args)
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model1 = create_model()
model2 = ViT_seg(config, img_size=args.patch_size,
num_classes=args.num_classes).cuda()
model2.load_from(config)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance1 = 0.0
best_performance2 = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(
iter_num // 150)
loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
pseudo_outputs1 = torch.argmax(
outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_outputs2 = torch.argmax(
outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_supervision1 = dice_loss(
outputs_soft1[args.labeled_bs:], pseudo_outputs2.unsqueeze(1))
pseudo_supervision2 = dice_loss(
outputs_soft2[args.labeled_bs:], pseudo_outputs1.unsqueeze(1))
model1_loss = loss1 + consistency_weight * pseudo_supervision1
model2_loss = loss2 + consistency_weight * pseudo_supervision2
loss = model1_loss + model2_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer1.param_groups:
param_group['lr'] = lr_
for param_group in optimizer2.param_groups:
param_group['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f' % (
iter_num, model1_loss.item(), model2_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs1, dim=1), dim=1, keepdim=True)
writer.add_image('train/model1_Prediction',
outputs[1, ...] * 50, iter_num)
outputs = torch.argmax(torch.softmax(
outputs2, dim=1), dim=1, keepdim=True)
writer.add_image('train/model2_Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes, patch_size=args.patch_size)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance1 = np.mean(metric_list, axis=0)[0]
mean_hd951 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model1_val_mean_dice',
performance1, iter_num)
writer.add_scalar('info/model1_val_mean_hd95',
mean_hd951, iter_num)
if performance1 > best_performance1:
best_performance1 = performance1
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
logging.info(
'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951))
model1.train()
model2.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes, patch_size=args.patch_size)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance2 = np.mean(metric_list, axis=0)[0]
mean_hd952 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model2_val_mean_dice',
performance2, iter_num)
writer.add_scalar('info/model2_val_mean_hd95',
mean_hd952, iter_num)
if performance2 > best_performance2:
best_performance2 = performance2
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
logging.info(
'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 18,123 | 43.530713 | 142 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_adversarial_network_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.discriminator import FC3DDiscriminator
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Adversarial_Network', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--DAN_lr', type=float, default=0.0001,
help='DAN learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
num_classes = 2
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
DAN = FC3DDiscriminator(num_classes=num_classes)
DAN = DAN.cuda()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
DAN_optimizer = optim.Adam(
DAN.parameters(), lr=args.DAN_lr, betas=(0.9, 0.99))
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
DAN_target = torch.tensor([1, 1, 0, 0]).cuda()
model.train()
DAN.eval()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
DAN_outputs = DAN(
outputs_soft[args.labeled_bs:], volume_batch[args.labeled_bs:])
consistency_loss = F.cross_entropy(
DAN_outputs, (DAN_target[:args.labeled_bs]).long())
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
DAN.train()
with torch.no_grad():
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
DAN_outputs = DAN(outputs_soft, volume_batch)
DAN_loss = F.cross_entropy(DAN_outputs, DAN_target.long())
DAN_optimizer.zero_grad()
DAN_loss.backward()
DAN_optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,326 | 40.643382 | 126 |
py
|
SSL4MIS
|
SSL4MIS-master/code/val_2D.py
|
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
if pred.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95
else:
return 0, 0
def test_single_volume(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
out = torch.argmax(torch.softmax(
net(input), dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list
def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
output_main, _, _, _ = net(input)
out = torch.argmax(torch.softmax(
output_main, dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list
| 2,359 | 35.307692 | 77 |
py
|
SSL4MIS
|
SSL4MIS-master/code/test_urpc.py
|
import argparse
import os
import shutil
from glob import glob
import numpy
import torch
from networks.unet_3D_dv_semi import unet_3D_dv_semi
from networks.unet_3D import unet_3D
from test_urpc_util import test_all_case
def net_factory(net_type="unet_3D", num_classes=3, in_channels=1):
if net_type == "unet_3D":
net = unet_3D(n_classes=num_classes, in_channels=in_channels).cuda()
elif net_type == "unet_3D_dv_semi":
net = unet_3D_dv_semi(n_classes=num_classes,
in_channels=in_channels).cuda()
else:
net = None
return net
def Inference(FLAGS):
snapshot_path = "../model/{}/{}".format(FLAGS.exp, FLAGS.model)
num_classes = 2
test_save_path = "../model/{}/Prediction".format(FLAGS.exp)
if os.path.exists(test_save_path):
shutil.rmtree(test_save_path)
os.makedirs(test_save_path)
net = net_factory(FLAGS.model, num_classes, in_channels=1)
save_mode_path = os.path.join(
snapshot_path, '{}_best_model.pth'.format(FLAGS.model))
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
avg_metric = test_all_case(net, base_dir=FLAGS.root_path, method=FLAGS.model, test_list="test.txt", num_classes=num_classes,
patch_size=(96, 96, 96), stride_xy=64, stride_z=64, test_save_path=test_save_path)
return avg_metric
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default="BraTS2019/Uncertainty_Rectified_Pyramid_Consistency_25_labeled", help='experiment_name')
parser.add_argument('--model', type=str,
default="unet_3D_dv_semi", help='model_name')
FLAGS = parser.parse_args()
metric = Inference(FLAGS)
print(metric)
| 1,990 | 34.553571 | 128 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_regularized_dropout_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='Regularized_Dropout', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[64, 128, 128],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
parser.add_argument('--total_num', type=int, default=50,
help='total data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=4.0, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
net1 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
net2 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
model1 = kaiming_normal_init_weight(net1)
model2 = xavier_normal_init_weight(net2)
model1.train()
model2.train()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, args.total_num))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
best_performance1 = 0.0
best_performance2 = 0.0
iter_num = 0
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
model1_loss = 0.5 * (ce_loss(outputs1[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
model2_loss = 0.5 * (ce_loss(outputs2[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
r_drop_loss = losses.compute_kl_loss(outputs1[args.labeled_bs:], outputs2[args.labeled_bs:])
loss = model1_loss + model2_loss + consistency_weight * r_drop_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group1 in optimizer1.param_groups:
param_group1['lr'] = lr_
for param_group2 in optimizer2.param_groups:
param_group2['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
writer.add_scalar('loss/r_drop_loss',
r_drop_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f r_drop_loss: %f' % (
iter_num, model1_loss.item(), model2_loss.item(), r_drop_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft1[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model1_Predicted_label',
grid_image, iter_num)
image = outputs_soft2[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model2_Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
avg_metric1 = test_all_case(
model1, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric1[:, 0].mean() > best_performance1:
best_performance1 = avg_metric1[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
writer.add_scalar('info/model1_val_dice_score',
avg_metric1[0, 0], iter_num)
writer.add_scalar('info/model1_val_hd95',
avg_metric1[0, 1], iter_num)
logging.info(
'iteration %d : model1_dice_score : %f model1_hd95 : %f' % (
iter_num, avg_metric1[0, 0].mean(), avg_metric1[0, 1].mean()))
model1.train()
model2.eval()
avg_metric2 = test_all_case(
model2, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric2[:, 0].mean() > best_performance2:
best_performance2 = avg_metric2[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
writer.add_scalar('info/model2_val_dice_score',
avg_metric2[0, 0], iter_num)
writer.add_scalar('info/model2_val_hd95',
avg_metric2[0, 1], iter_num)
logging.info(
'iteration %d : model2_dice_score : %f model2_hd95 : %f' % (
iter_num, avg_metric2[0, 0].mean(), avg_metric2[0, 1].mean()))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 13,874 | 42.359375 | 107 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_uncertainty_rectified_pyramid_consistency_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import BaseDataSets, RandomGenerator, TwoStreamBatchSampler
from utils import losses, metrics, ramps
from val_2D import test_single_volume_ds
from networks.net_factory import net_factory
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Uncertainty_Rectified_Pyramid_Consistency', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_urpc', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
kl_distance = nn.KLDivLoss(reduction='none')
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs, outputs_aux1, outputs_aux2, outputs_aux3 = model(
volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
outputs_aux1_soft = torch.softmax(outputs_aux1, dim=1)
outputs_aux2_soft = torch.softmax(outputs_aux2, dim=1)
outputs_aux3_soft = torch.softmax(outputs_aux3, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux1 = ce_loss(outputs_aux1[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux2 = ce_loss(outputs_aux2[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux3 = ce_loss(outputs_aux3[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux1 = dice_loss(
outputs_aux1_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux2 = dice_loss(
outputs_aux2_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux3 = dice_loss(
outputs_aux3_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = (loss_ce+loss_ce_aux1+loss_ce_aux2+loss_ce_aux3 +
loss_dice+loss_dice_aux1+loss_dice_aux2+loss_dice_aux3)/8
preds = (outputs_soft+outputs_aux1_soft +
outputs_aux2_soft+outputs_aux3_soft)/4
variance_main = torch.sum(kl_distance(
torch.log(outputs_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_main = torch.exp(-variance_main)
variance_aux1 = torch.sum(kl_distance(
torch.log(outputs_aux1_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux1 = torch.exp(-variance_aux1)
variance_aux2 = torch.sum(kl_distance(
torch.log(outputs_aux2_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux2 = torch.exp(-variance_aux2)
variance_aux3 = torch.sum(kl_distance(
torch.log(outputs_aux3_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux3 = torch.exp(-variance_aux3)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist_main = (
preds[args.labeled_bs:] - outputs_soft[args.labeled_bs:]) ** 2
consistency_loss_main = torch.mean(
consistency_dist_main * exp_variance_main) / (torch.mean(exp_variance_main) + 1e-8) + torch.mean(variance_main)
consistency_dist_aux1 = (
preds[args.labeled_bs:] - outputs_aux1_soft[args.labeled_bs:]) ** 2
consistency_loss_aux1 = torch.mean(
consistency_dist_aux1 * exp_variance_aux1) / (torch.mean(exp_variance_aux1) + 1e-8) + torch.mean(variance_aux1)
consistency_dist_aux2 = (
preds[args.labeled_bs:] - outputs_aux2_soft[args.labeled_bs:]) ** 2
consistency_loss_aux2 = torch.mean(
consistency_dist_aux2 * exp_variance_aux2) / (torch.mean(exp_variance_aux2) + 1e-8) + torch.mean(variance_aux2)
consistency_dist_aux3 = (
preds[args.labeled_bs:] - outputs_aux3_soft[args.labeled_bs:]) ** 2
consistency_loss_aux3 = torch.mean(
consistency_dist_aux3 * exp_variance_aux3) / (torch.mean(exp_variance_aux3) + 1e-8) + torch.mean(variance_aux3)
consistency_loss = (consistency_loss_main + consistency_loss_aux1 +
consistency_loss_aux2 + consistency_loss_aux3) / 4
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume_ds(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,087 | 44.153846 | 127 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_uncertainty_rectified_pyramid_consistency_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.unet_3D_dv_semi import unet_3D_dv_semi
from utils import losses, metrics, ramps
from val_urpc_util import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/GTV', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='GTV/uncertainty_rectified_pyramid_consistency', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D_dv_semi', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=60000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.1,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=18,
help='labeled data')
parser.add_argument('--total_labeled_num', type=int, default=180,
help='total labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=400.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
num_classes = 3
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
net = unet_3D_dv_semi(n_classes=num_classes, in_channels=1)
model = net.cuda()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, args.total_labeled_num))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
kl_distance = nn.KLDivLoss(reduction='none')
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs_aux1, outputs_aux2, outputs_aux3, outputs_aux4, = model(
volume_batch)
outputs_aux1_soft = torch.softmax(outputs_aux1, dim=1)
outputs_aux2_soft = torch.softmax(outputs_aux2, dim=1)
outputs_aux3_soft = torch.softmax(outputs_aux3, dim=1)
outputs_aux4_soft = torch.softmax(outputs_aux4, dim=1)
loss_ce_aux1 = ce_loss(outputs_aux1[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_ce_aux2 = ce_loss(outputs_aux2[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_ce_aux3 = ce_loss(outputs_aux3[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_ce_aux4 = ce_loss(outputs_aux4[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice_aux1 = dice_loss(
outputs_aux1_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux2 = dice_loss(
outputs_aux2_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux3 = dice_loss(
outputs_aux3_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux4 = dice_loss(
outputs_aux4_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = (loss_ce_aux1+loss_ce_aux2+loss_ce_aux3+loss_ce_aux4 +
loss_dice_aux1+loss_dice_aux2+loss_dice_aux3+loss_dice_aux4)/8
preds = (outputs_aux1_soft +
outputs_aux2_soft+outputs_aux3_soft+outputs_aux4_soft)/4
variance_aux1 = torch.sum(kl_distance(
torch.log(outputs_aux1_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux1 = torch.exp(-variance_aux1)
variance_aux2 = torch.sum(kl_distance(
torch.log(outputs_aux2_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux2 = torch.exp(-variance_aux2)
variance_aux3 = torch.sum(kl_distance(
torch.log(outputs_aux3_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux3 = torch.exp(-variance_aux3)
variance_aux4 = torch.sum(kl_distance(
torch.log(outputs_aux4_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux4 = torch.exp(-variance_aux4)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist_aux1 = (
preds[args.labeled_bs:] - outputs_aux1_soft[args.labeled_bs:]) ** 2
consistency_loss_aux1 = torch.mean(
consistency_dist_aux1 * exp_variance_aux1) / (torch.mean(exp_variance_aux1) + 1e-8) + torch.mean(variance_aux1)
consistency_dist_aux2 = (
preds[args.labeled_bs:] - outputs_aux2_soft[args.labeled_bs:]) ** 2
consistency_loss_aux2 = torch.mean(
consistency_dist_aux2 * exp_variance_aux2) / (torch.mean(exp_variance_aux2) + 1e-8) + torch.mean(variance_aux2)
consistency_dist_aux3 = (
preds[args.labeled_bs:] - outputs_aux3_soft[args.labeled_bs:]) ** 2
consistency_loss_aux3 = torch.mean(
consistency_dist_aux3 * exp_variance_aux3) / (torch.mean(exp_variance_aux3) + 1e-8) + torch.mean(variance_aux3)
consistency_dist_aux4 = (
preds[args.labeled_bs:] - outputs_aux4_soft[args.labeled_bs:]) ** 2
consistency_loss_aux4 = torch.mean(
consistency_dist_aux4 * exp_variance_aux4) / (torch.mean(exp_variance_aux4) + 1e-8) + torch.mean(variance_aux4)
consistency_loss = (consistency_loss_aux1 +
consistency_loss_aux2 + consistency_loss_aux3 + consistency_loss_aux4) / 4
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/supervised_loss',
supervised_loss, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, supervised_loss: %f' %
(iter_num, loss.item(), supervised_loss.item()))
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = torch.argmax(outputs_aux1_soft, dim=1, keepdim=True)[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1) * 100
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1) * 100
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=num_classes, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
for cls in range(1, num_classes):
writer.add_scalar('info/val_cls_{}_dice_score'.format(cls),
avg_metric[cls - 1, 0], iter_num)
writer.add_scalar('info/val_cls_{}_hd95'.format(cls),
avg_metric[cls - 1, 1], iter_num)
writer.add_scalar('info/val_mean_dice_score',
avg_metric[:, 0].mean(), iter_num)
writer.add_scalar('info/val_mean_hd95',
avg_metric[:, 1].mean(), iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (
iter_num, avg_metric[:, 0].mean(), avg_metric[:, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,470 | 45.085987 | 127 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_cross_pseudo_supervision_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Pseudo_Supervision', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model1 = create_model()
model2 = create_model()
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance1 = 0.0
best_performance2 = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
pseudo_outputs1 = torch.argmax(outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_outputs2 = torch.argmax(outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_supervision1 = ce_loss(outputs1[args.labeled_bs:], pseudo_outputs2)
pseudo_supervision2 = ce_loss(outputs2[args.labeled_bs:], pseudo_outputs1)
model1_loss = loss1 + consistency_weight * pseudo_supervision1
model2_loss = loss2 + consistency_weight * pseudo_supervision2
loss = model1_loss + model2_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer1.param_groups:
param_group['lr'] = lr_
for param_group in optimizer2.param_groups:
param_group['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f' % (iter_num, model1_loss.item(), model2_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs1, dim=1), dim=1, keepdim=True)
writer.add_image('train/model1_Prediction',
outputs[1, ...] * 50, iter_num)
outputs = torch.argmax(torch.softmax(
outputs2, dim=1), dim=1, keepdim=True)
writer.add_image('train/model2_Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance1 = np.mean(metric_list, axis=0)[0]
mean_hd951 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model1_val_mean_dice', performance1, iter_num)
writer.add_scalar('info/model1_val_mean_hd95', mean_hd951, iter_num)
if performance1 > best_performance1:
best_performance1 = performance1
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
logging.info(
'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951))
model1.train()
model2.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance2 = np.mean(metric_list, axis=0)[0]
mean_hd952 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model2_val_mean_dice', performance2, iter_num)
writer.add_scalar('info/model2_val_mean_hd95', mean_hd952, iter_num)
if performance2 > best_performance2:
best_performance2 = performance2
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
logging.info(
'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 15,182 | 42.38 | 129 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_uncertainty_aware_mean_teacher_3D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
def create_model(ema=False):
# Network definition
net = net_factory_3d(net_type=args.model,
in_chns=1, class_num=num_classes)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
T = 8
_, _, d, w, h = unlabeled_volume_batch.shape
volume_batch_r = unlabeled_volume_batch.repeat(2, 1, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, 2, d, w, h]).cuda()
for i in range(T//2):
ema_inputs = volume_batch_r + \
torch.clamp(torch.randn_like(
volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
preds[2 * stride * i:2 * stride *
(i + 1)] = ema_model(ema_inputs)
preds = torch.softmax(preds, dim=1)
preds = preds.reshape(T, stride, 2, d, w, h)
preds = torch.mean(preds, dim=0)
uncertainty = -1.0 * \
torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist = losses.softmax_mse_loss(
outputs[args.labeled_bs:], ema_output) # (batch, 2, 112,112,80)
threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num,
max_iterations))*np.log(2)
mask = (uncertainty < threshold).float()
consistency_loss = torch.sum(
mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,302 | 41.570934 | 126 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_uncertainty_aware_mean_teacher_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Uncertainty_Aware_Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
T = 8
_, _, w, h = unlabeled_volume_batch.shape
volume_batch_r = unlabeled_volume_batch.repeat(2, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, num_classes, w, h]).cuda()
for i in range(T//2):
ema_inputs = volume_batch_r + \
torch.clamp(torch.randn_like(
volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
preds[2 * stride * i:2 * stride *
(i + 1)] = ema_model(ema_inputs)
preds = F.softmax(preds, dim=1)
preds = preds.reshape(T, stride, num_classes, w, h)
preds = torch.mean(preds, dim=0)
uncertainty = -1.0 * \
torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist = losses.softmax_mse_loss(
outputs[args.labeled_bs:], ema_output) # (batch, 2, 112,112,80)
threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num,
max_iterations))*np.log(2)
mask = (uncertainty < threshold).float()
consistency_loss = torch.sum(
mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,976 | 41.970199 | 108 |
py
|
SSL4MIS
|
SSL4MIS-master/code/train_mean_teacher_2D.py
|
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
ema_output_soft = torch.softmax(ema_output, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
if iter_num < 1000:
consistency_loss = 0.0
else:
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:]-ema_output_soft)**2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,916 | 40.961268 | 108 |
py
|
SSL4MIS
|
SSL4MIS-master/code/test_3D_util.py
|
import math
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from skimage.measure import label
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y1 = net(test_patch)
# ensemble
y = torch.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, method="unet_3D", test_list="full_test.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24, test_save_path=None):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes-1, 4))
print("Testing begin")
with open(test_save_path + "/{}.txt".format(method), "a") as f:
for image_path in tqdm(image_list):
ids = image_path.split("/")[-1].replace(".h5", "")
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
metric = calculate_metric_percase(prediction == 1, label == 1)
total_metric[0, :] += metric
f.writelines("{},{},{},{},{}\n".format(
ids, metric[0], metric[1], metric[2], metric[3]))
pred_itk = sitk.GetImageFromArray(prediction.astype(np.uint8))
pred_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(pred_itk, test_save_path +
"/{}_pred.nii.gz".format(ids))
img_itk = sitk.GetImageFromArray(image)
img_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(img_itk, test_save_path +
"/{}_img.nii.gz".format(ids))
lab_itk = sitk.GetImageFromArray(label.astype(np.uint8))
lab_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(lab_itk, test_save_path +
"/{}_lab.nii.gz".format(ids))
f.writelines("Mean metrics,{},{},{},{}".format(total_metric[0, 0] / len(image_list), total_metric[0, 1] / len(
image_list), total_metric[0, 2] / len(image_list), total_metric[0, 3] / len(image_list)))
f.close()
print("Testing end")
return total_metric / len(image_list)
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction == i)
label_tmp = (label == i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / \
(np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
dice = metric.binary.dc(pred, gt)
ravd = abs(metric.binary.ravd(pred, gt))
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return np.array([dice, ravd, hd, asd])
| 6,008 | 38.27451 | 169 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/efficient_encoder.py
|
import re
from typing import List
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import get_model_params, url_map
from torchvision.models.densenet import DenseNet
from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet
class EncoderMixin:
"""Add encoder functionality such as:
- output channels specification of feature tensors (produced by encoder)
- patching first convolution for arbitrary input channels
"""
@property
def out_channels(self) -> List:
"""Return channels dimensions for each tensor of forward output of encoder"""
return self._out_channels[: self._depth + 1]
def set_in_channels(self, in_channels):
"""Change first convolution chennels"""
if in_channels == 3:
return
self._in_channels = in_channels
if self._out_channels[0] == 3:
self._out_channels = tuple([in_channels] + list(self._out_channels)[1:])
patch_first_conv(model=self, in_channels=in_channels)
def patch_first_conv(model, in_channels):
"""Change first convolution layer input channels.
In case:
in_channels == 1 or in_channels == 2 -> reuse original weights
in_channels > 3 -> make random kaiming normal initialization
"""
# get first conv
for module in model.modules():
if isinstance(module, nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
else:
reset = True
weight = torch.Tensor(
module.out_channels,
module.in_channels // module.groups,
*module.kernel_size
)
module.weight = nn.parameter.Parameter(weight)
if reset:
module.reset_parameters()
class EfficientNetEncoder(EfficientNet, EncoderMixin):
def __init__(self, stage_idxs, out_channels, model_name, depth=5):
blocks_args, global_params = get_model_params(model_name, override_params=None)
super().__init__(blocks_args, global_params)
self._stage_idxs = list(stage_idxs) + [len(self._blocks)]
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
del self._fc
def forward(self, x):
features = [x]
if self._depth > 0:
x = self._swish(self._bn0(self._conv_stem(x)))
features.append(x)
if self._depth > 1:
skip_connection_idx = 0
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if idx == self._stage_idxs[skip_connection_idx] - 1:
skip_connection_idx += 1
features.append(x)
if skip_connection_idx + 1 == self._depth:
break
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("_fc.bias")
state_dict.pop("_fc.weight")
super().load_state_dict(state_dict, **kwargs)
def _get_pretrained_settings(encoder):
pretrained_settings = {
"imagenet": {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
"url": url_map[encoder],
"input_space": "RGB",
"input_range": [0, 1],
}
}
return pretrained_settings
efficient_net_encoders = {
"efficientnet-b0": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b0"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (3, 5, 9),
"model_name": "efficientnet-b0",
},
},
"efficientnet-b1": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b1"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b1",
},
},
"efficientnet-b2": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b2"),
"params": {
"out_channels": (3, 32, 24, 48, 120, 352),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b2",
},
},
"efficientnet-b3": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b3"),
"params": {
"out_channels": (3, 40, 32, 48, 136, 384),
"stage_idxs": (5, 8, 18),
"model_name": "efficientnet-b3",
},
},
"efficientnet-b4": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b4"),
"params": {
"out_channels": (3, 48, 32, 56, 160, 448),
"stage_idxs": (6, 10, 22),
"model_name": "efficientnet-b4",
},
},
"efficientnet-b5": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b5"),
"params": {
"out_channels": (3, 48, 40, 64, 176, 512),
"stage_idxs": (8, 13, 27),
"model_name": "efficientnet-b5",
},
},
"efficientnet-b6": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b6"),
"params": {
"out_channels": (3, 56, 40, 72, 200, 576),
"stage_idxs": (9, 15, 31),
"model_name": "efficientnet-b6",
},
},
"efficientnet-b7": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b7"),
"params": {
"out_channels": (3, 64, 48, 80, 224, 640),
"stage_idxs": (11, 18, 38),
"model_name": "efficientnet-b7",
},
},
}
encoders = {}
encoders.update(efficient_net_encoders)
def get_encoder(name, in_channels=3, depth=5, weights=None):
Encoder = encoders[name]["encoder"]
params = encoders[name]["params"]
params.update(depth=depth)
encoder = Encoder(**params)
if weights is not None:
settings = encoders[name]["pretrained_settings"][weights]
encoder.load_state_dict(model_zoo.load_url(settings["url"]))
encoder.set_in_channels(in_channels)
return encoder
# class ResNetEncoder(ResNet, EncoderMixin):
# def __init__(self, out_channels, depth=5, **kwargs):
# super().__init__(**kwargs)
# self._depth = depth
# self._out_channels = out_channels
# self._in_channels = 3
# del self.fc
# del self.avgpool
# def get_stages(self):
# return [
# nn.Identity(),
# nn.Sequential(self.conv1, self.bn1, self.relu),
# nn.Sequential(self.maxpool, self.layer1),
# self.layer2,
# self.layer3,
# self.layer4,
# ]
# def forward(self, x):
# stages = self.get_stages()
# features = []
# for i in range(self._depth + 1):
# x = stages[i](x)
# features.append(x)
# return features
# def load_state_dict(self, state_dict, **kwargs):
# state_dict.pop("fc.bias")
# state_dict.pop("fc.weight")
# super().load_state_dict(state_dict, **kwargs)
# resnet_encoders = {
# "resnet18": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet18"],
# "params": {
# "out_channels": (3, 64, 64, 128, 256, 512),
# "block": BasicBlock,
# "layers": [2, 2, 2, 2],
# },
# },
# "resnet34": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet34"],
# "params": {
# "out_channels": (3, 64, 64, 128, 256, 512),
# "block": BasicBlock,
# "layers": [3, 4, 6, 3],
# },
# },
# "resnet50": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet50"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 6, 3],
# },
# },
# "resnet101": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet101"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# },
# },
# "resnet152": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet152"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 8, 36, 3],
# },
# },
# "resnext50_32x4d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "imagenet": {
# "url": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 6, 3],
# "groups": 32,
# "width_per_group": 4,
# },
# },
# "resnext101_32x8d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "imagenet": {
# "url": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# },
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# },
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 8,
# },
# },
# "resnext101_32x16d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 16,
# },
# },
# "resnext101_32x32d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 32,
# },
# },
# "resnext101_32x48d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 48,
# },
# },
# }
# class TransitionWithSkip(nn.Module):
# def __init__(self, module):
# super().__init__()
# self.module = module
# def forward(self, x):
# for module in self.module:
# x = module(x)
# if isinstance(module, nn.ReLU):
# skip = x
# return x, skip
# class DenseNetEncoder(DenseNet, EncoderMixin):
# def __init__(self, out_channels, depth=5, **kwargs):
# super().__init__(**kwargs)
# self._out_channels = out_channels
# self._depth = depth
# self._in_channels = 3
# del self.classifier
# def make_dilated(self, stage_list, dilation_list):
# raise ValueError("DenseNet encoders do not support dilated mode "
# "due to pooling operation for downsampling!")
# def get_stages(self):
# return [
# nn.Identity(),
# nn.Sequential(self.features.conv0, self.features.norm0, self.features.relu0),
# nn.Sequential(self.features.pool0, self.features.denseblock1,
# TransitionWithSkip(self.features.transition1)),
# nn.Sequential(self.features.denseblock2, TransitionWithSkip(self.features.transition2)),
# nn.Sequential(self.features.denseblock3, TransitionWithSkip(self.features.transition3)),
# nn.Sequential(self.features.denseblock4, self.features.norm5)
# ]
# def forward(self, x):
# stages = self.get_stages()
# features = []
# for i in range(self._depth + 1):
# x = stages[i](x)
# if isinstance(x, (list, tuple)):
# x, skip = x
# features.append(skip)
# else:
# features.append(x)
# return features
# def load_state_dict(self, state_dict):
# pattern = re.compile(
# r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
# )
# for key in list(state_dict.keys()):
# res = pattern.match(key)
# if res:
# new_key = res.group(1) + res.group(2)
# state_dict[new_key] = state_dict[key]
# del state_dict[key]
# # remove linear
# state_dict.pop("classifier.bias")
# state_dict.pop("classifier.weight")
# super().load_state_dict(state_dict)
# densenet_encoders = {
# "densenet121": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet121"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 1024),
# "num_init_features": 64,
# "growth_rate": 32,
# "block_config": (6, 12, 24, 16),
# },
# },
# "densenet169": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet169"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1280, 1664),
# "num_init_features": 64,
# "growth_rate": 32,
# "block_config": (6, 12, 32, 32),
# },
# },
# "densenet201": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet201"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1792, 1920),
# "num_init_features": 64,
# "growth_rate": 32,
# "block_config": (6, 12, 48, 32),
# },
# },
# "densenet161": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet161"],
# "params": {
# "out_channels": (3, 96, 384, 768, 2112, 2208),
# "num_init_features": 96,
# "growth_rate": 48,
# "block_config": (6, 12, 36, 24),
# },
# },
# }
# net = get_encoder(name="efficientnet-b4", in_channels=1, depth=5, weights="imagenet")
#
# t = torch.rand(2, 1, 480, 480)
#
# print(len(net(t)))
| 17,641 | 31.975701 | 110 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/pnet.py
|
# -*- coding: utf-8 -*-
"""
An PyTorch implementation of the DeepIGeoS paper:
Wang, Guotai and Zuluaga, Maria A and Li, Wenqi and Pratt, Rosalind and Patel, Premal A and Aertsen, Michael and Doel, Tom and David, Anna L and Deprest, Jan and Ourselin, S{\'e}bastien and others:
DeepIGeoS: a deep interactive geodesic framework for medical image segmentation.
TPAMI (7) 2018: 1559--1572
Note that there are some modifications from the original paper, such as
the use of leaky relu here.
"""
from __future__ import division, print_function
import torch
import torch.nn as nn
class PNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, dilation, padding):
super(PNetBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.dilation = dilation
self.padding = padding
self.conv1 = nn.Conv2d(self.in_chns, self.out_chns, kernel_size=3,
padding=self.padding, dilation=self.dilation, groups=1, bias=True)
self.conv2 = nn.Conv2d(self.out_chns, self.out_chns, kernel_size=3,
padding=self.padding, dilation=self.dilation, groups=1, bias=True)
self.in1 = nn.BatchNorm2d(self.out_chns)
self.in2 = nn.BatchNorm2d(self.out_chns)
self.ac1 = nn.LeakyReLU()
self.ac2 = nn.LeakyReLU()
def forward(self, x):
x = self.conv1(x)
x = self.in1(x)
x = self.ac1(x)
x = self.conv2(x)
x = self.in2(x)
x = self.ac2(x)
return x
class ConcatBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConcatBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(
self.in_chns, self.in_chns, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(
self.in_chns, self.out_chns, kernel_size=1, padding=0)
self.ac1 = nn.LeakyReLU()
self.ac2 = nn.LeakyReLU()
def forward(self, x):
x = self.conv1(x)
x = self.ac1(x)
x = self.conv2(x)
x = self.ac2(x)
return x
class OutPutBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutPutBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(
self.in_chns, self.in_chns // 2, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(
self.in_chns // 2, self.out_chns, kernel_size=1, padding=0)
self.drop1 = nn.Dropout2d(0.3)
self.drop2 = nn.Dropout2d(0.3)
self.ac1 = nn.LeakyReLU()
def forward(self, x):
x = self.drop1(x)
x = self.conv1(x)
x = self.ac1(x)
x = self.drop2(x)
x = self.conv2(x)
return x
class PNet2D(nn.Module):
def __init__(self, in_chns, out_chns, num_filters, ratios):
super(PNet2D, self).__init__()
self.in_chns = in_chns
self.out_chns = out_chns
self.ratios = ratios
self.num_filters = num_filters
self.block1 = PNetBlock(
self.in_chns, self.num_filters, self.ratios[0], padding=self.ratios[0])
self.block2 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[1], padding=self.ratios[1])
self.block3 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[2], padding=self.ratios[2])
self.block4 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[3], padding=self.ratios[3])
self.block5 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[4], padding=self.ratios[4])
self.catblock = ConcatBlock(self.num_filters * 5, self.num_filters * 2)
self.out = OutPutBlock(self.num_filters * 2, self.out_chns)
def forward(self, x):
x1 = self.block1(x)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
conx = torch.cat([x1, x2, x3, x4, x5], dim=1)
conx = self.catblock(conx)
out = self.out(conx)
return out
| 4,200 | 33.154472 | 202 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/grid_attention_layer.py
|
import torch
from torch import nn
from torch.nn import functional as F
from networks.networks_other import init_weights
class _GridAttentionBlockND(nn.Module):
def __init__(self, in_channels, gating_channels, inter_channels=None, dimension=3, mode='concatenation',
sub_sample_factor=(2,2,2)):
super(_GridAttentionBlockND, self).__init__()
assert dimension in [2, 3]
assert mode in ['concatenation', 'concatenation_debug', 'concatenation_residual']
# Downsampling rate for the input featuremap
if isinstance(sub_sample_factor, tuple): self.sub_sample_factor = sub_sample_factor
elif isinstance(sub_sample_factor, list): self.sub_sample_factor = tuple(sub_sample_factor)
else: self.sub_sample_factor = tuple([sub_sample_factor]) * dimension
# Default parameter set
self.mode = mode
self.dimension = dimension
self.sub_sample_kernel_size = self.sub_sample_factor
# Number of channels (pixel dimensions)
self.in_channels = in_channels
self.gating_channels = gating_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
bn = nn.BatchNorm3d
self.upsample_mode = 'trilinear'
elif dimension == 2:
conv_nd = nn.Conv2d
bn = nn.BatchNorm2d
self.upsample_mode = 'bilinear'
else:
raise NotImplemented
# Output transform
self.W = nn.Sequential(
conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0),
bn(self.in_channels),
)
# Theta^T * x_ij + Phi^T * gating_signal + bias
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)
self.phi = conv_nd(in_channels=self.gating_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0, bias=True)
self.psi = conv_nd(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
# Initialise weights
for m in self.children():
init_weights(m, init_type='kaiming')
# Define the operation
if mode == 'concatenation':
self.operation_function = self._concatenation
elif mode == 'concatenation_debug':
self.operation_function = self._concatenation_debug
elif mode == 'concatenation_residual':
self.operation_function = self._concatenation_residual
else:
raise NotImplementedError('Unknown operation function.')
def forward(self, x, g):
'''
:param x: (b, c, t, h, w)
:param g: (b, g_d)
:return:
'''
output = self.operation_function(x, g)
return output
def _concatenation(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.relu(theta_x + phi_g, inplace=True)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
sigm_psi_f = F.sigmoid(self.psi(f))
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
def _concatenation_debug(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.softplus(theta_x + phi_g)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
sigm_psi_f = F.sigmoid(self.psi(f))
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
def _concatenation_residual(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.relu(theta_x + phi_g, inplace=True)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
f = self.psi(f).view(batch_size, 1, -1)
sigm_psi_f = F.softmax(f, dim=2).view(batch_size, 1, *theta_x.size()[2:])
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class GridAttentionBlock2D(_GridAttentionBlockND):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(2,2,2)):
super(GridAttentionBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=2, mode=mode,
sub_sample_factor=sub_sample_factor,
)
class GridAttentionBlock3D(_GridAttentionBlockND):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(2,2,2)):
super(GridAttentionBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=3, mode=mode,
sub_sample_factor=sub_sample_factor,
)
class _GridAttentionBlockND_TORR(nn.Module):
def __init__(self, in_channels, gating_channels, inter_channels=None, dimension=3, mode='concatenation',
sub_sample_factor=(1,1,1), bn_layer=True, use_W=True, use_phi=True, use_theta=True, use_psi=True, nonlinearity1='relu'):
super(_GridAttentionBlockND_TORR, self).__init__()
assert dimension in [2, 3]
assert mode in ['concatenation', 'concatenation_softmax',
'concatenation_sigmoid', 'concatenation_mean',
'concatenation_range_normalise', 'concatenation_mean_flow']
# Default parameter set
self.mode = mode
self.dimension = dimension
self.sub_sample_factor = sub_sample_factor if isinstance(sub_sample_factor, tuple) else tuple([sub_sample_factor])*dimension
self.sub_sample_kernel_size = self.sub_sample_factor
# Number of channels (pixel dimensions)
self.in_channels = in_channels
self.gating_channels = gating_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
bn = nn.BatchNorm3d
self.upsample_mode = 'trilinear'
elif dimension == 2:
conv_nd = nn.Conv2d
bn = nn.BatchNorm2d
self.upsample_mode = 'bilinear'
else:
raise NotImplemented
# initialise id functions
# Theta^T * x_ij + Phi^T * gating_signal + bias
self.W = lambda x: x
self.theta = lambda x: x
self.psi = lambda x: x
self.phi = lambda x: x
self.nl1 = lambda x: x
if use_W:
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0),
bn(self.in_channels),
)
else:
self.W = conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0)
if use_theta:
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)
if use_phi:
self.phi = conv_nd(in_channels=self.gating_channels, out_channels=self.inter_channels,
kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)
if use_psi:
self.psi = conv_nd(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
if nonlinearity1:
if nonlinearity1 == 'relu':
self.nl1 = lambda x: F.relu(x, inplace=True)
if 'concatenation' in mode:
self.operation_function = self._concatenation
else:
raise NotImplementedError('Unknown operation function.')
# Initialise weights
for m in self.children():
init_weights(m, init_type='kaiming')
if use_psi and self.mode == 'concatenation_sigmoid':
nn.init.constant(self.psi.bias.data, 3.0)
if use_psi and self.mode == 'concatenation_softmax':
nn.init.constant(self.psi.bias.data, 10.0)
# if use_psi and self.mode == 'concatenation_mean':
# nn.init.constant(self.psi.bias.data, 3.0)
# if use_psi and self.mode == 'concatenation_range_normalise':
# nn.init.constant(self.psi.bias.data, 3.0)
parallel = False
if parallel:
if use_W: self.W = nn.DataParallel(self.W)
if use_phi: self.phi = nn.DataParallel(self.phi)
if use_psi: self.psi = nn.DataParallel(self.psi)
if use_theta: self.theta = nn.DataParallel(self.theta)
def forward(self, x, g):
'''
:param x: (b, c, t, h, w)
:param g: (b, g_d)
:return:
'''
output = self.operation_function(x, g)
return output
def _concatenation(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
#############################
# compute compatibility score
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w)
# phi => (b, c, t, h, w) -> (b, i_c, t, h, w)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# nl(theta.x + phi.g + bias) -> f = (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = theta_x + phi_g
f = self.nl1(f)
psi_f = self.psi(f)
############################################
# normalisation -- scale compatibility score
# psi^T . f -> (b, 1, t/s1, h/s2, w/s3)
if self.mode == 'concatenation_softmax':
sigm_psi_f = F.softmax(psi_f.view(batch_size, 1, -1), dim=2)
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_mean':
psi_f_flat = psi_f.view(batch_size, 1, -1)
psi_f_sum = torch.sum(psi_f_flat, dim=2)#clamp(1e-6)
psi_f_sum = psi_f_sum[:,:,None].expand_as(psi_f_flat)
sigm_psi_f = psi_f_flat / psi_f_sum
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_mean_flow':
psi_f_flat = psi_f.view(batch_size, 1, -1)
ss = psi_f_flat.shape
psi_f_min = psi_f_flat.min(dim=2)[0].view(ss[0],ss[1],1)
psi_f_flat = psi_f_flat - psi_f_min
psi_f_sum = torch.sum(psi_f_flat, dim=2).view(ss[0],ss[1],1).expand_as(psi_f_flat)
sigm_psi_f = psi_f_flat / psi_f_sum
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_range_normalise':
psi_f_flat = psi_f.view(batch_size, 1, -1)
ss = psi_f_flat.shape
psi_f_max = torch.max(psi_f_flat, dim=2)[0].view(ss[0], ss[1], 1)
psi_f_min = torch.min(psi_f_flat, dim=2)[0].view(ss[0], ss[1], 1)
sigm_psi_f = (psi_f_flat - psi_f_min) / (psi_f_max - psi_f_min).expand_as(psi_f_flat)
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_sigmoid':
sigm_psi_f = F.sigmoid(psi_f)
else:
raise NotImplementedError
# sigm_psi_f is attention map! upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class GridAttentionBlock2D_TORR(_GridAttentionBlockND_TORR):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(1,1), bn_layer=True,
use_W=True, use_phi=True, use_theta=True, use_psi=True,
nonlinearity1='relu'):
super(GridAttentionBlock2D_TORR, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=2, mode=mode,
sub_sample_factor=sub_sample_factor,
bn_layer=bn_layer,
use_W=use_W,
use_phi=use_phi,
use_theta=use_theta,
use_psi=use_psi,
nonlinearity1=nonlinearity1)
class GridAttentionBlock3D_TORR(_GridAttentionBlockND_TORR):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(1,1,1), bn_layer=True):
super(GridAttentionBlock3D_TORR, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=3, mode=mode,
sub_sample_factor=sub_sample_factor,
bn_layer=bn_layer)
if __name__ == '__main__':
from torch.autograd import Variable
mode_list = ['concatenation']
for mode in mode_list:
img = Variable(torch.rand(2, 16, 10, 10, 10))
gat = Variable(torch.rand(2, 64, 4, 4, 4))
net = GridAttentionBlock3D(in_channels=16, inter_channels=16, gating_channels=64, mode=mode, sub_sample_factor=(2,2,2))
out, sigma = net(img, gat)
print(out.size())
| 16,619 | 40.446384 | 137 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/attention_unet.py
|
import torch.nn as nn
import torch
from networks.utils import UnetConv3, UnetUp3_CT, UnetGridGatingSignal3, UnetDsv3
import torch.nn.functional as F
from networks.networks_other import init_weights
from networks.grid_attention_layer import GridAttentionBlock3D
class Attention_UNet(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3,
nonlocal_mode='concatenation', attention_dsample=(2,2,2), is_batchnorm=True):
super(Attention_UNet, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)
# attention blocks
self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1],
nonlocal_mode=nonlocal_mode, sub_sample_factor= attention_dsample)
self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2],
nonlocal_mode=nonlocal_mode, sub_sample_factor= attention_dsample)
self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3],
nonlocal_mode=nonlocal_mode, sub_sample_factor= attention_dsample)
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
# deep supervision
self.dsv4 = UnetDsv3(in_size=filters[3], out_size=n_classes, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=filters[2], out_size=n_classes, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=filters[1], out_size=n_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=n_classes, kernel_size=1)
# final conv (without any concat)
self.final = nn.Conv3d(n_classes*4, n_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
# Feature Extraction
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
# Gating Signal Generation
center = self.center(maxpool4)
gating = self.gating(center)
# Attention Mechanism
# Upscaling Part (Decoder)
g_conv4, att4 = self.attentionblock4(conv4, gating)
up4 = self.up_concat4(g_conv4, center)
g_conv3, att3 = self.attentionblock3(conv3, up4)
up3 = self.up_concat3(g_conv3, up4)
g_conv2, att2 = self.attentionblock2(conv2, up3)
up2 = self.up_concat2(g_conv2, up3)
up1 = self.up_concat1(conv1, up2)
# Deep Supervision
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
final = self.final(torch.cat([dsv1,dsv2,dsv3,dsv4], dim=1))
return final
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
class MultiAttentionBlock(nn.Module):
def __init__(self, in_size, gate_size, inter_size, nonlocal_mode, sub_sample_factor):
super(MultiAttentionBlock, self).__init__()
self.gate_block_1 = GridAttentionBlock3D(in_channels=in_size, gating_channels=gate_size,
inter_channels=inter_size, mode=nonlocal_mode,
sub_sample_factor= sub_sample_factor)
self.gate_block_2 = GridAttentionBlock3D(in_channels=in_size, gating_channels=gate_size,
inter_channels=inter_size, mode=nonlocal_mode,
sub_sample_factor=sub_sample_factor)
self.combine_gates = nn.Sequential(nn.Conv3d(in_size*2, in_size, kernel_size=1, stride=1, padding=0),
nn.BatchNorm3d(in_size),
nn.ReLU(inplace=True)
)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('GridAttentionBlock3D') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, input, gating_signal):
gate_1, attention_1 = self.gate_block_1(input, gating_signal)
gate_2, attention_2 = self.gate_block_2(input, gating_signal)
return self.combine_gates(torch.cat([gate_1, gate_2], 1)), torch.cat([attention_1, attention_2], 1)
| 6,336 | 45.595588 | 122 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/discriminator.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FC3DDiscriminator(nn.Module):
def __init__(self, num_classes, ndf=64, n_channel=1):
super(FC3DDiscriminator, self).__init__()
# downsample 16
self.conv0 = nn.Conv3d(
num_classes, ndf, kernel_size=4, stride=2, padding=1)
self.conv1 = nn.Conv3d(
n_channel, ndf, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv3d(ndf, ndf*2, kernel_size=4, stride=2, padding=1)
self.conv3 = nn.Conv3d(
ndf*2, ndf*4, kernel_size=4, stride=2, padding=1)
self.conv4 = nn.Conv3d(
ndf*4, ndf*8, kernel_size=4, stride=2, padding=1)
self.avgpool = nn.AvgPool3d((6, 6, 6)) # (D/16, W/16, H/16)
self.classifier = nn.Linear(ndf*8, 2)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.dropout = nn.Dropout3d(0.5)
self.Softmax = nn.Softmax()
def forward(self, map, image):
batch_size = map.shape[0]
map_feature = self.conv0(map)
image_feature = self.conv1(image)
x = torch.add(map_feature, image_feature)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv4(x)
x = self.leaky_relu(x)
x = self.avgpool(x)
x = x.view(batch_size, -1)
x = self.classifier(x)
x = x.reshape((batch_size, 2))
# x = self.Softmax(x)
return x
class FCDiscriminator(nn.Module):
def __init__(self, num_classes, ndf=64, n_channel=1):
super(FCDiscriminator, self).__init__()
self.conv0 = nn.Conv2d(
num_classes, ndf, kernel_size=4, stride=2, padding=1)
self.conv1 = nn.Conv2d(
n_channel, ndf, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1)
self.conv3 = nn.Conv2d(
ndf*2, ndf*4, kernel_size=4, stride=2, padding=1)
self.conv4 = nn.Conv2d(
ndf*4, ndf*8, kernel_size=4, stride=2, padding=1)
self.classifier = nn.Linear(ndf*32, 2)
self.avgpool = nn.AvgPool2d((7, 7))
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.dropout = nn.Dropout2d(0.5)
# self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear')
# self.sigmoid = nn.Sigmoid()
def forward(self, map, feature):
map_feature = self.conv0(map)
image_feature = self.conv1(feature)
x = torch.add(map_feature, image_feature)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv4(x)
x = self.leaky_relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
# x = self.up_sample(x)
# x = self.sigmoid(x)
return x
| 3,133 | 30.029703 | 78 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/encoder_tool.py
|
from typing import List
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import get_model_params, url_map
class EncoderMixin:
"""Add encoder functionality such as:
- output channels specification of feature tensors (produced by encoder)
- patching first convolution for arbitrary input channels
"""
@property
def out_channels(self) -> List:
"""Return channels dimensions for each tensor of forward output of encoder"""
return self._out_channels[: self._depth + 1]
def set_in_channels(self, in_channels):
"""Change first convolution chennels"""
if in_channels == 3:
return
self._in_channels = in_channels
if self._out_channels[0] == 3:
self._out_channels = tuple([in_channels] + list(self._out_channels)[1:])
patch_first_conv(model=self, in_channels=in_channels)
def patch_first_conv(model, in_channels):
"""Change first convolution layer input channels.
In case:
in_channels == 1 or in_channels == 2 -> reuse original weights
in_channels > 3 -> make random kaiming normal initialization
"""
# get first conv
for module in model.modules():
if isinstance(module, nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
else:
reset = True
weight = torch.Tensor(
module.out_channels,
module.in_channels // module.groups,
*module.kernel_size
)
module.weight = nn.parameter.Parameter(weight)
if reset:
module.reset_parameters()
class EfficientNetEncoder(EfficientNet, EncoderMixin):
def __init__(self, stage_idxs, out_channels, model_name, depth=5):
blocks_args, global_params = get_model_params(model_name, override_params=None)
super().__init__(blocks_args, global_params)
self._stage_idxs = list(stage_idxs) + [len(self._blocks)]
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
del self._fc
def forward(self, x):
features = [x]
if self._depth > 0:
x = self._swish(self._bn0(self._conv_stem(x)))
features.append(x)
if self._depth > 1:
skip_connection_idx = 0
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if idx == self._stage_idxs[skip_connection_idx] - 1:
skip_connection_idx += 1
features.append(x)
if skip_connection_idx + 1 == self._depth:
break
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("_fc.bias")
state_dict.pop("_fc.weight")
super().load_state_dict(state_dict, **kwargs)
def _get_pretrained_settings(encoder):
pretrained_settings = {
"imagenet": {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
"url": url_map[encoder],
"input_space": "RGB",
"input_range": [0, 1],
}
}
return pretrained_settings
efficient_net_encoders = {
"efficientnet-b0": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b0"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (3, 5, 9),
"model_name": "efficientnet-b0",
},
},
"efficientnet-b1": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b1"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b1",
},
},
"efficientnet-b2": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b2"),
"params": {
"out_channels": (3, 32, 24, 48, 120, 352),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b2",
},
},
"efficientnet-b3": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b3"),
"params": {
"out_channels": (3, 40, 32, 48, 136, 384),
"stage_idxs": (5, 8, 18),
"model_name": "efficientnet-b3",
},
},
"efficientnet-b4": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b4"),
"params": {
"out_channels": (3, 48, 32, 56, 160, 448),
"stage_idxs": (6, 10, 22),
"model_name": "efficientnet-b4",
},
},
"efficientnet-b5": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b5"),
"params": {
"out_channels": (3, 48, 40, 64, 176, 512),
"stage_idxs": (8, 13, 27),
"model_name": "efficientnet-b5",
},
},
"efficientnet-b6": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b6"),
"params": {
"out_channels": (3, 56, 40, 72, 200, 576),
"stage_idxs": (9, 15, 31),
"model_name": "efficientnet-b6",
},
},
"efficientnet-b7": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b7"),
"params": {
"out_channels": (3, 64, 48, 80, 224, 640),
"stage_idxs": (11, 18, 38),
"model_name": "efficientnet-b7",
},
},
}
encoders = {}
encoders.update(efficient_net_encoders)
def get_encoder(name, in_channels=3, depth=5, weights=None):
Encoder = encoders[name]["encoder"]
params = encoders[name]["params"]
params.update(depth=depth)
encoder = Encoder(**params)
if weights is not None:
settings = encoders[name]["pretrained_settings"][weights]
encoder.load_state_dict(model_zoo.load_url(settings["url"]))
encoder.set_in_channels(in_channels)
return encoder
| 6,765 | 30.765258 | 87 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/net_factory_3d.py
|
from networks.unet_3D import unet_3D
from networks.vnet import VNet
from networks.VoxResNet import VoxResNet
from networks.attention_unet import Attention_UNet
from networks.nnunet import initialize_network
def net_factory_3d(net_type="unet_3D", in_chns=1, class_num=2):
if net_type == "unet_3D":
net = unet_3D(n_classes=class_num, in_channels=in_chns).cuda()
elif net_type == "attention_unet":
net = Attention_UNet(n_classes=class_num, in_channels=in_chns).cuda()
elif net_type == "voxresnet":
net = VoxResNet(in_chns=in_chns, feature_chns=64,
class_num=class_num).cuda()
elif net_type == "vnet":
net = VNet(n_channels=in_chns, n_classes=class_num,
normalization='batchnorm', has_dropout=True).cuda()
elif net_type == "nnUNet":
net = initialize_network(num_classes=class_num).cuda()
else:
net = None
return net
| 933 | 37.916667 | 77 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/net_factory.py
|
from networks.efficientunet import Effi_UNet
from networks.enet import ENet
from networks.pnet import PNet2D
from networks.unet import UNet, UNet_DS, UNet_URPC, UNet_CCT
import argparse
from networks.vision_transformer import SwinUnet as ViT_seg
from networks.config import get_config
from networks.nnunet import initialize_network
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Supervision_CNN_Trans2D', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=8,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[224, 224],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument(
'--cfg', type=str, default="../code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true',
help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int,
help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true',
help='Test throughput only')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=4,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
config = get_config(args)
def net_factory(net_type="unet", in_chns=1, class_num=3):
if net_type == "unet":
net = UNet(in_chns=in_chns, class_num=class_num).cuda()
elif net_type == "enet":
net = ENet(in_channels=in_chns, num_classes=class_num).cuda()
elif net_type == "unet_ds":
net = UNet_DS(in_chns=in_chns, class_num=class_num).cuda()
elif net_type == "unet_cct":
net = UNet_CCT(in_chns=in_chns, class_num=class_num).cuda()
elif net_type == "unet_urpc":
net = UNet_URPC(in_chns=in_chns, class_num=class_num).cuda()
elif net_type == "efficient_unet":
net = Effi_UNet('efficientnet-b3', encoder_weights='imagenet',
in_channels=in_chns, classes=class_num).cuda()
elif net_type == "ViT_Seg":
net = ViT_seg(config, img_size=args.patch_size,
num_classes=args.num_classes).cuda()
elif net_type == "pnet":
net = PNet2D(in_chns, class_num, 64, [1, 2, 4, 8, 16]).cuda()
elif net_type == "nnUNet":
net = initialize_network(num_classes=class_num).cuda()
else:
net = None
return net
| 4,746 | 46.949495 | 118 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/utils.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.networks_other import init_weights
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(conv2DBatchNorm, self).__init__()
self.cb_unit = nn.Sequential(nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs
class deconv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(deconv2DBatchNorm, self).__init__()
self.dcb_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),)
def forward(self, inputs):
outputs = self.dcb_unit(inputs)
return outputs
class conv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(conv2DBatchNormRelu, self).__init__()
self.cbr_unit = nn.Sequential(nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),
nn.ReLU(inplace=True),)
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class deconv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(deconv2DBatchNormRelu, self).__init__()
self.dcbr_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),
nn.ReLU(inplace=True),)
def forward(self, inputs):
outputs = self.dcbr_unit(inputs)
return outputs
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d'%i, conv)
in_size = out_size
else:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d'%i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n+1):
conv = getattr(self, 'conv%d'%i)
x = conv(x)
return x
class UnetConv3(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, kernel_size=(3,3,1), padding_size=(1,1,0), init_stride=(1,1,1)):
super(UnetConv3, self).__init__()
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.ReLU(inplace=True),)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
return outputs
class FCNConv3(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, kernel_size=(3,3,1), padding_size=(1,1,0), init_stride=(1,1,1)):
super(FCNConv3, self).__init__()
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
self.conv3 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.ReLU(inplace=True),)
self.conv3 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.ReLU(inplace=True),)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
return outputs
class UnetGatingSignal3(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(UnetGatingSignal3, self).__init__()
self.fmap_size = (4, 4, 4)
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, in_size//2, (1,1,1), (1,1,1), (0,0,0)),
nn.InstanceNorm3d(in_size//2),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool3d(output_size=self.fmap_size),
)
self.fc1 = nn.Linear(in_features=(in_size//2) * self.fmap_size[0] * self.fmap_size[1] * self.fmap_size[2],
out_features=out_size, bias=True)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, in_size//2, (1,1,1), (1,1,1), (0,0,0)),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool3d(output_size=self.fmap_size),
)
self.fc1 = nn.Linear(in_features=(in_size//2) * self.fmap_size[0] * self.fmap_size[1] * self.fmap_size[2],
out_features=out_size, bias=True)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
batch_size = inputs.size(0)
outputs = self.conv1(inputs)
outputs = outputs.view(batch_size, -1)
outputs = self.fc1(outputs)
return outputs
class UnetGridGatingSignal3(nn.Module):
def __init__(self, in_size, out_size, kernel_size=(1,1,1), is_batchnorm=True):
super(UnetGridGatingSignal3, self).__init__()
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, (1,1,1), (0,0,0)),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),
)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, (1,1,1), (0,0,0)),
nn.ReLU(inplace=True),
)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv1(inputs)
return outputs
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2]
outputs1 = F.pad(inputs1, padding)
return self.conv(torch.cat([outputs1, outputs2], 1))
class UnetUp3(nn.Module):
def __init__(self, in_size, out_size, is_deconv, is_batchnorm=True):
super(UnetUp3, self).__init__()
if is_deconv:
self.conv = UnetConv3(in_size, out_size, is_batchnorm)
self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=(4,4,1), stride=(2,2,1), padding=(1,1,0))
else:
self.conv = UnetConv3(in_size+out_size, out_size, is_batchnorm)
self.up = nn.Upsample(scale_factor=(2, 2, 1), mode='trilinear')
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv3') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2, 0]
outputs1 = F.pad(inputs1, padding)
return self.conv(torch.cat([outputs1, outputs2], 1))
class UnetUp3_CT(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm=True):
super(UnetUp3_CT, self).__init__()
self.conv = UnetConv3(in_size + out_size, out_size, is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.up = nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear')
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv3') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2, 0]
outputs1 = F.pad(inputs1, padding)
return self.conv(torch.cat([outputs1, outputs2], 1))
# Squeeze-and-Excitation Network
class SqEx(nn.Module):
def __init__(self, n_features, reduction=6):
super(SqEx, self).__init__()
if n_features % reduction != 0:
raise ValueError('n_features must be divisible by reduction (default = 4)')
self.linear1 = nn.Linear(n_features, n_features // reduction, bias=False)
self.nonlin1 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(n_features // reduction, n_features, bias=False)
self.nonlin2 = nn.Sigmoid()
def forward(self, x):
y = F.avg_pool3d(x, kernel_size=x.size()[2:5])
y = y.permute(0, 2, 3, 4, 1)
y = self.nonlin1(self.linear1(y))
y = self.nonlin2(self.linear2(y))
y = y.permute(0, 4, 1, 2, 3)
y = x * y
return y
class UnetUp3_SqEx(nn.Module):
def __init__(self, in_size, out_size, is_deconv, is_batchnorm):
super(UnetUp3_SqEx, self).__init__()
if is_deconv:
self.sqex = SqEx(n_features=in_size+out_size)
self.conv = UnetConv3(in_size, out_size, is_batchnorm)
self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=(4,4,1), stride=(2,2,1), padding=(1,1,0))
else:
self.sqex = SqEx(n_features=in_size+out_size)
self.conv = UnetConv3(in_size+out_size, out_size, is_batchnorm)
self.up = nn.Upsample(scale_factor=(2, 2, 1), mode='trilinear')
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv3') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2, 0]
outputs1 = F.pad(inputs1, padding)
concat = torch.cat([outputs1, outputs2], 1)
gated = self.sqex(concat)
return self.conv(gated)
class residualBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, n_filters, stride=1, downsample=None):
super(residualBlock, self).__init__()
self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, 1, bias=False)
self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, bias=False)
self.downsample = downsample
self.stride = stride
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.convbnrelu1(x)
out = self.convbn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class residualBottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, n_filters, stride=1, downsample=None):
super(residualBottleneck, self).__init__()
self.convbn1 = nn.Conv2DBatchNorm(in_channels, n_filters, k_size=1, bias=False)
self.convbn2 = nn.Conv2DBatchNorm(n_filters, n_filters, k_size=3, padding=1, stride=stride, bias=False)
self.convbn3 = nn.Conv2DBatchNorm(n_filters, n_filters * 4, k_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.convbn1(x)
out = self.convbn2(out)
out = self.convbn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SeqModelFeatureExtractor(nn.Module):
def __init__(self, submodule, extracted_layers):
super(SeqModelFeatureExtractor, self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
x = module(x)
if name in self.extracted_layers:
outputs += [x]
return outputs + [x]
class HookBasedFeatureExtractor(nn.Module):
def __init__(self, submodule, layername, upscale=False):
super(HookBasedFeatureExtractor, self).__init__()
self.submodule = submodule
self.submodule.eval()
self.layername = layername
self.outputs_size = None
self.outputs = None
self.inputs = None
self.inputs_size = None
self.upscale = upscale
def get_input_array(self, m, i, o):
if isinstance(i, tuple):
self.inputs = [i[index].data.clone() for index in range(len(i))]
self.inputs_size = [input.size() for input in self.inputs]
else:
self.inputs = i.data.clone()
self.inputs_size = self.input.size()
print('Input Array Size: ', self.inputs_size)
def get_output_array(self, m, i, o):
if isinstance(o, tuple):
self.outputs = [o[index].data.clone() for index in range(len(o))]
self.outputs_size = [output.size() for output in self.outputs]
else:
self.outputs = o.data.clone()
self.outputs_size = self.outputs.size()
print('Output Array Size: ', self.outputs_size)
def rescale_output_array(self, newsize):
us = nn.Upsample(size=newsize[2:], mode='bilinear')
if isinstance(self.outputs, list):
for index in range(len(self.outputs)): self.outputs[index] = us(self.outputs[index]).data()
else:
self.outputs = us(self.outputs).data()
def forward(self, x):
target_layer = self.submodule._modules.get(self.layername)
# Collect the output tensor
h_inp = target_layer.register_forward_hook(self.get_input_array)
h_out = target_layer.register_forward_hook(self.get_output_array)
self.submodule(x)
h_inp.remove()
h_out.remove()
# Rescale the feature-map if it's required
if self.upscale: self.rescale_output_array(x.size())
return self.inputs, self.outputs
class UnetDsv3(nn.Module):
def __init__(self, in_size, out_size, scale_factor):
super(UnetDsv3, self).__init__()
self.dsv = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size=1, stride=1, padding=0),
nn.Upsample(scale_factor=scale_factor, mode='trilinear'), )
def forward(self, input):
return self.dsv(input)
| 18,130 | 38.159827 | 120 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/neural_network.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from torch.cuda.amp import autocast
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
from torch import nn
import torch
from scipy.ndimage.filters import gaussian_filter
from typing import Union, Tuple, List
class no_op(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
def maybe_to_torch(d):
if isinstance(d, list):
d = [maybe_to_torch(i) if not isinstance(
i, torch.Tensor) else i for i in d]
elif not isinstance(d, torch.Tensor):
d = torch.from_numpy(d).float()
return d
def to_cuda(data, non_blocking=True, gpu_id=0):
if isinstance(data, list):
data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data]
else:
data = data.cuda(gpu_id, non_blocking=non_blocking)
return data
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
super(NeuralNetwork, self).__init__()
# if we have 5 pooling then our patch size must be divisible by 2**5
# for example in a 2d network that does 5 pool in x and 6 pool
self.input_shape_must_be_divisible_by = None
# in y this would be (32, 64)
# we need to know this because we need to know if we are a 2d or a 3d netowrk
self.conv_op = None # nn.Conv2d or nn.Conv3d
# this tells us how many channely we have in the output. Important for preallocation in inference
self.num_classes = None # number of channels in the output
# depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions
# during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what
# to apply in inference. For the most part this will be softmax
self.inference_apply_nonlin = lambda x: x # softmax_helper
# This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the
# center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians
# can be expensive, so it makes sense to save and reuse them.
self._gaussian_3d = self._patch_size_for_gaussian_3d = None
self._gaussian_2d = self._patch_size_for_gaussian_2d = None
def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),
use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will
detect that automatically and run the appropriate code.
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:param mixed_precision: if True, will run inference in mixed precision with autocast()
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if verbose:
print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if self.conv_op == nn.Conv2d:
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.conv_op == nn.Conv3d:
if max(mirror_axes) > 2:
raise ValueError("mirror axes. duh")
if self.training:
print(
'WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv3d:
if use_sliding_window:
res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose)
else:
res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)
elif self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, False)
else:
res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, all_in_gpu, False)
else:
raise RuntimeError(
"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D
image with that (you dummy).
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if self.conv_op == nn.Conv3d:
raise RuntimeError(
"Cannot predict 2d if the network is 3d. Dummy.")
if verbose:
print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.training:
print(
'WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 3, "data must have shape (c,x,y)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, verbose)
else:
res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, verbose)
else:
raise RuntimeError(
"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
@staticmethod
def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:
tmp = np.zeros(patch_size)
center_coords = [i // 2 for i in patch_size]
sigmas = [i * sigma_scale for i in patch_size]
tmp[tuple(center_coords)] = 1
gaussian_importance_map = gaussian_filter(
tmp, sigmas, 0, mode='constant', cval=0)
gaussian_importance_map = gaussian_importance_map / \
np.max(gaussian_importance_map) * 1
gaussian_importance_map = gaussian_importance_map.astype(np.float32)
# gaussian_importance_map cannot be 0, otherwise we may end up with nans!
gaussian_importance_map[gaussian_importance_map == 0] = np.min(
gaussian_importance_map[gaussian_importance_map != 0])
return gaussian_importance_map
@staticmethod
def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:
assert [i >= j for i, j in zip(
image_size, patch_size)], "image size must be as large or larger than patch_size"
assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'
# our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of
# 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46
target_step_sizes_in_voxels = [i * step_size for i in patch_size]
num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j,
k in zip(image_size, target_step_sizes_in_voxels, patch_size)]
steps = []
for dim in range(len(patch_size)):
# the highest step value for this dimension is
max_step_value = image_size[dim] - patch_size[dim]
if num_steps[dim] > 1:
actual_step_size = max_step_value / (num_steps[dim] - 1)
else:
# does not matter because there is only one step at 0
actual_step_size = 99999999999
steps_here = [int(np.round(actual_step_size * i))
for i in range(num_steps[dim])]
steps.append(steps_here)
return steps
def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
if verbose:
print("step_size:", step_size)
if verbose:
print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(
x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y, z
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(
patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_3d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):
if verbose:
print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(
patch_size, sigma_scale=1. / 8)
self._gaussian_3d = gaussian_importance_map
self._patch_size_for_gaussian_3d = patch_size
else:
if verbose:
print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_3d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(
data.shape[1:], device=self.get_device())
if verbose:
print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose:
print("moving data to GPU")
data = torch.from_numpy(data).cuda(
self.get_device(), non_blocking=True)
if verbose:
print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_3d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
for z in steps[2]:
lb_z = z
ub_z = z + patch_size[2]
predicted_patch = self._internal_maybe_mirror_and_pred_3D(
data[None, :, lb_x:ub_x, lb_y:ub_y,
lb_z:ub_z], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x,
lb_y:ub_y, lb_z:ub_z] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x,
lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(
class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose:
print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose:
print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_2D_2Dconv'
if verbose:
print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(
predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_3D_3Dconv'
if verbose:
print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(
predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 8
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))
result_torch += 1 / num_results * torch.flip(pred, (4,))
if m == 2 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3,))
if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3))
if m == 4 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2,))
if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 2))
if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(
self(torch.flip(x, (4, 3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
assert len(x.shape) == 4, 'x must be (b, c, x, y)'
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 4
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3, ))
if m == 2 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2, ))
if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
if verbose:
print("step_size:", step_size)
if verbose:
print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(
x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(
patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_2d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):
if verbose:
print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(
patch_size, sigma_scale=1. / 8)
self._gaussian_2d = gaussian_importance_map
self._patch_size_for_gaussian_2d = patch_size
else:
if verbose:
print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_2d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(
data.shape[1:], device=self.get_device())
if verbose:
print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose:
print("moving data to GPU")
data = torch.from_numpy(data).cuda(
self.get_device(), non_blocking=True)
if verbose:
print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_2d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
predicted_patch = self._internal_maybe_mirror_and_pred_2D(
data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x,
lb_y:ub_y] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(
class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose:
print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose:
print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(
x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pseudo3D_slices: int = 5, all_in_gpu: bool = False,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd"
extra_slices = (pseudo3D_slices - 1) // 2
shp_for_pad = np.array(x.shape)
shp_for_pad[1] = extra_slices
pad = np.zeros(shp_for_pad, dtype=np.float32)
data = np.concatenate((pad, x, pad), 1)
predicted_segmentation = []
softmax_pred = []
for s in range(extra_slices, data.shape[1] - extra_slices):
d = data[:, (s - extra_slices):(s + extra_slices + 1)]
d = d.reshape((-1, d.shape[-2], d.shape[-1]))
pred_seg, softmax_pres = \
self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,
regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), step_size: float = 0.5,
regions_class_order: tuple = None, use_gaussian: bool = False,
pad_border_mode: str = "edge", pad_kwargs: dict = None,
all_in_gpu: bool = False,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(
x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs, all_in_gpu, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
if __name__ == '__main__':
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (162, 529, 529), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (162, 529, 529), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (162, 529, 529), 0.1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (60, 448, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (60, 448, 224), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (30, 224, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (30, 224, 224), 0.125))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(123, 54, 123), (246, 162, 369), 0.25))
| 45,370 | 49.189159 | 137 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/VoxResNet.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
class SEBlock(nn.Module):
def __init__(self, in_channels, r):
super(SEBlock, self).__init__()
redu_chns = int(in_channels / r)
self.se_layers = nn.Sequential(
nn.AdaptiveAvgPool3d(1),
nn.Conv3d(in_channels, redu_chns, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv3d(redu_chns, in_channels, kernel_size=1, padding=0),
nn.ReLU())
def forward(self, x):
f = self.se_layers(x)
return f * x + x
class VoxRex(nn.Module):
def __init__(self, in_channels):
super(VoxRex, self).__init__()
self.block = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, in_channels,
kernel_size=3, padding=1, bias=False),
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, in_channels,
kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
return self.block(x)+x
class ConvBlock(nn.Module):
"""two convolution layers with batch norm and leaky relu"""
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv_conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, out_channels,
kernel_size=3, padding=1, bias=False),
nn.InstanceNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels, out_channels,
kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
return self.conv_conv(x)
class UpBlock(nn.Module):
"""Upssampling followed by ConvBlock"""
def __init__(self, in_channels, out_channels):
super(UpBlock, self).__init__()
self.up = nn.Upsample(
scale_factor=2, mode='trilinear', align_corners=True)
self.conv = ConvBlock(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class VoxResNet(nn.Module):
def __init__(self, in_chns=1, feature_chns=64, class_num=2):
super(VoxResNet, self).__init__()
self.in_chns = in_chns
self.ft_chns = feature_chns
self.n_class = class_num
self.conv1 = nn.Conv3d(in_chns, feature_chns, kernel_size=3, padding=1)
self.res1 = VoxRex(feature_chns)
self.res2 = VoxRex(feature_chns)
self.res3 = VoxRex(feature_chns)
self.res4 = VoxRex(feature_chns)
self.res5 = VoxRex(feature_chns)
self.res6 = VoxRex(feature_chns)
self.up1 = UpBlock(feature_chns * 2, feature_chns)
self.up2 = UpBlock(feature_chns * 2, feature_chns)
self.out = nn.Conv3d(feature_chns, self.n_class, kernel_size=1)
self.maxpool = nn.MaxPool3d(2)
self.upsample = nn.Upsample(
scale_factor=2, mode='trilinear', align_corners=True)
def forward(self, x):
x = self.maxpool(self.conv1(x))
x1 = self.res1(x)
x2 = self.res2(x1)
x2_pool = self.maxpool(x2)
x3 = self.res3(x2_pool)
x4 = self.maxpool(self.res4(x3))
x5 = self.res5(x4)
x6 = self.res6(x5)
up1 = self.up1(x6, x2_pool)
up2 = self.up2(up1, x)
up = self.upsample(up2)
out = self.out(up)
return out
| 3,637 | 30.094017 | 79 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/vision_transformer.py
|
# coding=utf-8
# This file borrowed from Swin-UNet: https://github.com/HuCaoFighting/Swin-Unet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
from networks.swin_transformer_unet_skip_expand_decoder_sys import SwinTransformerSys
logger = logging.getLogger(__name__)
class SwinUnet(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(SwinUnet, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.config = config
self.swin_unet = SwinTransformerSys(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=self.num_classes,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
def forward(self, x):
if x.size()[1] == 1:
x = x.repeat(1,3,1,1)
logits = self.swin_unet(x)
return logits
def load_from(self, config):
pretrained_path = config.MODEL.PRETRAIN_CKPT
if pretrained_path is not None:
print("pretrained_path:{}".format(pretrained_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_dict = torch.load(pretrained_path, map_location=device)
if "model" not in pretrained_dict:
print("---start load pretrained modle by splitting---")
pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()}
for k in list(pretrained_dict.keys()):
if "output" in k:
print("delete key:{}".format(k))
del pretrained_dict[k]
msg = self.swin_unet.load_state_dict(pretrained_dict,strict=False)
# print(msg)
return
pretrained_dict = pretrained_dict['model']
print("---start load pretrained modle of swin encoder---")
model_dict = self.swin_unet.state_dict()
full_dict = copy.deepcopy(pretrained_dict)
for k, v in pretrained_dict.items():
if "layers." in k:
current_layer_num = 3-int(k[7:8])
current_k = "layers_up." + str(current_layer_num) + k[8:]
full_dict.update({current_k:v})
for k in list(full_dict.keys()):
if k in model_dict:
if full_dict[k].shape != model_dict[k].shape:
print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape))
del full_dict[k]
msg = self.swin_unet.load_state_dict(full_dict, strict=False)
# print(msg)
else:
print("none pretrain")
| 3,981 | 43.244444 | 113 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/swin_transformer_unet_skip_expand_decoder_sys.py
|
# This file borrowed from Swin-UNet: https://github.com/HuCaoFighting/Swin-Unet
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size,
W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous(
).view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size,
window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - \
coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - \
1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index",
relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C //
self.num_heads).permute(2, 0, 3, 1, 4)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1,
self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(
attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
# nW*B, window_size, window_size, C
x_windows = window_partition(shifted_x, self.window_size)
# nW*B, window_size*window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
# W-MSA/SW-MSA
# nW*B, window_size*window_size, C
attn_windows = self.attn(x_windows, mask=self.attn_mask)
# merge windows
attn_windows = attn_windows.view(-1,
self.window_size, self.window_size, C)
shifted_x = window_reverse(
attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(
self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class PatchExpand(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.expand = nn.Linear(
dim, 2*dim, bias=False) if dim_scale == 2 else nn.Identity()
self.norm = norm_layer(dim // dim_scale)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c',
p1=2, p2=2, c=C//4)
x = x.view(B, -1, C//4)
x = self.norm(x)
return x
class FinalPatchExpand_X4(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.dim_scale = dim_scale
self.expand = nn.Linear(dim, 16*dim, bias=False)
self.output_dim = dim
self.norm = norm_layer(self.output_dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c',
p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B, -1, self.output_dim)
x = self.norm(x)
return x
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (
i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(
drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class BasicLayer_up(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (
i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(
drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if upsample is not None:
self.upsample = PatchExpand(
input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)
else:
self.upsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.upsample is not None:
x = self.upsample(x)
return x
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] //
patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * \
(self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformerSys(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, final_upsample="expand_first", **kwargs):
super().__init__()
print("SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}".format(depths,
depths_decoder, drop_path_rate, num_classes))
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features_up = int(embed_dim * 2)
self.mlp_ratio = mlp_ratio
self.final_upsample = final_upsample
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
sum(depths))] # stochastic depth decay rule
# build encoder and bottleneck layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(
depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (
i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
# build decoder layers
self.layers_up = nn.ModuleList()
self.concat_back_dim = nn.ModuleList()
for i_layer in range(self.num_layers):
concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)),
int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity()
if i_layer == 0:
layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer)
else:
layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)),
input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))),
depth=depths[(
self.num_layers-1-i_layer)],
num_heads=num_heads[(
self.num_layers-1-i_layer)],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:(
self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])],
norm_layer=norm_layer,
upsample=PatchExpand if (
i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers_up.append(layer_up)
self.concat_back_dim.append(concat_linear)
self.norm = norm_layer(self.num_features)
self.norm_up = norm_layer(self.embed_dim)
if self.final_upsample == "expand_first":
print("---final upsample expand_first---")
self.up = FinalPatchExpand_X4(input_resolution=(
img_size//patch_size, img_size//patch_size), dim_scale=4, dim=embed_dim)
self.output = nn.Conv2d(
in_channels=embed_dim, out_channels=self.num_classes, kernel_size=1, bias=False)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
#Encoder and Bottleneck
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x_downsample = []
for layer in self.layers:
x_downsample.append(x)
x = layer(x)
x = self.norm(x) # B L C
return x, x_downsample
# Dencoder and Skip connection
def forward_up_features(self, x, x_downsample):
for inx, layer_up in enumerate(self.layers_up):
if inx == 0:
x = layer_up(x)
else:
x = torch.cat([x, x_downsample[3-inx]], -1)
x = self.concat_back_dim[inx](x)
x = layer_up(x)
x = self.norm_up(x) # B L C
return x
def up_x4(self, x):
H, W = self.patches_resolution
B, L, C = x.shape
assert L == H*W, "input features has wrong size"
if self.final_upsample == "expand_first":
x = self.up(x)
x = x.view(B, 4*H, 4*W, -1)
x = x.permute(0, 3, 1, 2) # B,C,H,W
x = self.output(x)
return x
def forward(self, x):
x, x_downsample = self.forward_features(x)
x = self.forward_up_features(x, x_downsample)
x = self.up_x4(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * \
self.patches_resolution[0] * \
self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| 33,208 | 40.253416 | 209 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/config.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------'
import os
import yaml
from yacs.config import CfgNode as CN
_C = CN()
# Base config files
_C.BASE = ['']
# -----------------------------------------------------------------------------
# Data settings
# -----------------------------------------------------------------------------
_C.DATA = CN()
# Batch size for a single GPU, could be overwritten by command line argument
_C.DATA.BATCH_SIZE = 128
# Path to dataset, could be overwritten by command line argument
_C.DATA.DATA_PATH = ''
# Dataset name
_C.DATA.DATASET = 'imagenet'
# Input image size
_C.DATA.IMG_SIZE = 224
# Interpolation to resize image (random, bilinear, bicubic)
_C.DATA.INTERPOLATION = 'bicubic'
# Use zipped dataset instead of folder dataset
# could be overwritten by command line argument
_C.DATA.ZIP_MODE = False
# Cache Data in Memory, could be overwritten by command line argument
_C.DATA.CACHE_MODE = 'part'
# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.DATA.PIN_MEMORY = True
# Number of data loading threads
_C.DATA.NUM_WORKERS = 8
# -----------------------------------------------------------------------------
# Model settings
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model type
_C.MODEL.TYPE = 'swin'
# Model name
_C.MODEL.NAME = 'swin_tiny_patch4_window7_224'
# Checkpoint to resume, could be overwritten by command line argument
_C.MODEL.PRETRAIN_CKPT = './pretrained_ckpt/swin_tiny_patch4_window7_224.pth'
_C.MODEL.RESUME = ''
# Number of classes, overwritten in data preparation
_C.MODEL.NUM_CLASSES = 1000
# Dropout rate
_C.MODEL.DROP_RATE = 0.0
# Drop path rate
_C.MODEL.DROP_PATH_RATE = 0.1
# Label Smoothing
_C.MODEL.LABEL_SMOOTHING = 0.1
# Swin Transformer parameters
_C.MODEL.SWIN = CN()
_C.MODEL.SWIN.PATCH_SIZE = 4
_C.MODEL.SWIN.IN_CHANS = 3
_C.MODEL.SWIN.EMBED_DIM = 96
_C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
_C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2]
_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
_C.MODEL.SWIN.WINDOW_SIZE = 7
_C.MODEL.SWIN.MLP_RATIO = 4.
_C.MODEL.SWIN.QKV_BIAS = True
_C.MODEL.SWIN.QK_SCALE = False
_C.MODEL.SWIN.APE = False
_C.MODEL.SWIN.PATCH_NORM = True
_C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first"
# -----------------------------------------------------------------------------
# Training settings
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.START_EPOCH = 0
_C.TRAIN.EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 5e-4
_C.TRAIN.WARMUP_LR = 5e-7
_C.TRAIN.MIN_LR = 5e-6
# Clip gradient norm
_C.TRAIN.CLIP_GRAD = 5.0
# Auto resume from latest checkpoint
_C.TRAIN.AUTO_RESUME = True
# Gradient accumulation steps
# could be overwritten by command line argument
_C.TRAIN.ACCUMULATION_STEPS = 0
# Whether to use gradient checkpointing to save memory
# could be overwritten by command line argument
_C.TRAIN.USE_CHECKPOINT = False
# LR scheduler
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
# Epoch interval to decay LR, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
# LR decay rate, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
# Optimizer
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'adamw'
# Optimizer Epsilon
_C.TRAIN.OPTIMIZER.EPS = 1e-8
# Optimizer Betas
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
# SGD momentum
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# -----------------------------------------------------------------------------
# Augmentation settings
# -----------------------------------------------------------------------------
_C.AUG = CN()
# Color jitter factor
_C.AUG.COLOR_JITTER = 0.4
# Use AutoAugment policy. "v0" or "original"
_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
# Random erase prob
_C.AUG.REPROB = 0.25
# Random erase mode
_C.AUG.REMODE = 'pixel'
# Random erase count
_C.AUG.RECOUNT = 1
# Mixup alpha, mixup enabled if > 0
_C.AUG.MIXUP = 0.8
# Cutmix alpha, cutmix enabled if > 0
_C.AUG.CUTMIX = 1.0
# Cutmix min/max ratio, overrides alpha and enables cutmix if set
_C.AUG.CUTMIX_MINMAX = False
# Probability of performing mixup or cutmix when either/both is enabled
_C.AUG.MIXUP_PROB = 1.0
# Probability of switching to cutmix when both mixup and cutmix enabled
_C.AUG.MIXUP_SWITCH_PROB = 0.5
# How to apply mixup/cutmix params. Per "batch", "pair", or "elem"
_C.AUG.MIXUP_MODE = 'batch'
# -----------------------------------------------------------------------------
# Testing settings
# -----------------------------------------------------------------------------
_C.TEST = CN()
# Whether to use center crop when testing
_C.TEST.CROP = True
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2')
# overwritten by command line argument
_C.AMP_OPT_LEVEL = ''
# Path to output folder, overwritten by command line argument
_C.OUTPUT = ''
# Tag of experiment, overwritten by command line argument
_C.TAG = 'default'
# Frequency to save checkpoint
_C.SAVE_FREQ = 1
# Frequency to logging info
_C.PRINT_FREQ = 10
# Fixed random seed
_C.SEED = 0
# Perform evaluation only, overwritten by command line argument
_C.EVAL_MODE = False
# Test throughput only, overwritten by command line argument
_C.THROUGHPUT_MODE = False
# local rank for DistributedDataParallel, given by command line argument
_C.LOCAL_RANK = 0
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
# merge from specific arguments
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.zip:
config.DATA.ZIP_MODE = True
if args.cache_mode:
config.DATA.CACHE_MODE = args.cache_mode
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.use_checkpoint:
config.TRAIN.USE_CHECKPOINT = True
if args.amp_opt_level:
config.AMP_OPT_LEVEL = args.amp_opt_level
if args.tag:
config.TAG = args.tag
if args.eval:
config.EVAL_MODE = True
if args.throughput:
config.THROUGHPUT_MODE = True
config.freeze()
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
| 7,355 | 30.982609 | 79 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/unet.py
|
# -*- coding: utf-8 -*-
"""
The implementation is borrowed from: https://github.com/HiLab-git/PyMIC
"""
from __future__ import division, print_function
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.uniform import Uniform
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def sparse_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.sparse_(m.weight, sparsity=0.1)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
class ConvBlock(nn.Module):
"""two convolution layers with batch norm and leaky relu"""
def __init__(self, in_channels, out_channels, dropout_p):
super(ConvBlock, self).__init__()
self.conv_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(),
nn.Dropout(dropout_p),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU()
)
def forward(self, x):
return self.conv_conv(x)
class DownBlock(nn.Module):
"""Downsampling followed by ConvBlock"""
def __init__(self, in_channels, out_channels, dropout_p):
super(DownBlock, self).__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
ConvBlock(in_channels, out_channels, dropout_p)
)
def forward(self, x):
return self.maxpool_conv(x)
class UpBlock(nn.Module):
"""Upssampling followed by ConvBlock"""
def __init__(self, in_channels1, in_channels2, out_channels, dropout_p,
bilinear=True):
super(UpBlock, self).__init__()
self.bilinear = bilinear
if bilinear:
self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1)
self.up = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(
in_channels1, in_channels2, kernel_size=2, stride=2)
self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p)
def forward(self, x1, x2):
if self.bilinear:
x1 = self.conv1x1(x1)
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class Encoder(nn.Module):
def __init__(self, params):
super(Encoder, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
self.dropout = self.params['dropout']
assert (len(self.ft_chns) == 5)
self.in_conv = ConvBlock(
self.in_chns, self.ft_chns[0], self.dropout[0])
self.down1 = DownBlock(
self.ft_chns[0], self.ft_chns[1], self.dropout[1])
self.down2 = DownBlock(
self.ft_chns[1], self.ft_chns[2], self.dropout[2])
self.down3 = DownBlock(
self.ft_chns[2], self.ft_chns[3], self.dropout[3])
self.down4 = DownBlock(
self.ft_chns[3], self.ft_chns[4], self.dropout[4])
def forward(self, x):
x0 = self.in_conv(x)
x1 = self.down1(x0)
x2 = self.down2(x1)
x3 = self.down3(x2)
x4 = self.down4(x3)
return [x0, x1, x2, x3, x4]
class Decoder(nn.Module):
def __init__(self, params):
super(Decoder, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
assert (len(self.ft_chns) == 5)
self.up1 = UpBlock(
self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)
self.up2 = UpBlock(
self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)
self.up3 = UpBlock(
self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)
self.up4 = UpBlock(
self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size=3, padding=1)
def forward(self, feature):
x0 = feature[0]
x1 = feature[1]
x2 = feature[2]
x3 = feature[3]
x4 = feature[4]
x = self.up1(x4, x3)
x = self.up2(x, x2)
x = self.up3(x, x1)
x = self.up4(x, x0)
output = self.out_conv(x)
return output
class Decoder_DS(nn.Module):
def __init__(self, params):
super(Decoder_DS, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
assert (len(self.ft_chns) == 5)
self.up1 = UpBlock(
self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)
self.up2 = UpBlock(
self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)
self.up3 = UpBlock(
self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)
self.up4 = UpBlock(
self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp1 = nn.Conv2d(self.ft_chns[1], self.n_class,
kernel_size=3, padding=1)
def forward(self, feature, shape):
x0 = feature[0]
x1 = feature[1]
x2 = feature[2]
x3 = feature[3]
x4 = feature[4]
x = self.up1(x4, x3)
dp3_out_seg = self.out_conv_dp3(x)
dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape)
x = self.up2(x, x2)
dp2_out_seg = self.out_conv_dp2(x)
dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape)
x = self.up3(x, x1)
dp1_out_seg = self.out_conv_dp1(x)
dp1_out_seg = torch.nn.functional.interpolate(dp1_out_seg, shape)
x = self.up4(x, x0)
dp0_out_seg = self.out_conv(x)
return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg
class Decoder_URPC(nn.Module):
def __init__(self, params):
super(Decoder_URPC, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
assert (len(self.ft_chns) == 5)
self.up1 = UpBlock(
self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)
self.up2 = UpBlock(
self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)
self.up3 = UpBlock(
self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)
self.up4 = UpBlock(
self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp1 = nn.Conv2d(self.ft_chns[1], self.n_class,
kernel_size=3, padding=1)
self.feature_noise = FeatureNoise()
def forward(self, feature, shape):
x0 = feature[0]
x1 = feature[1]
x2 = feature[2]
x3 = feature[3]
x4 = feature[4]
x = self.up1(x4, x3)
if self.training:
dp3_out_seg = self.out_conv_dp3(Dropout(x, p=0.5))
else:
dp3_out_seg = self.out_conv_dp3(x)
dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape)
x = self.up2(x, x2)
if self.training:
dp2_out_seg = self.out_conv_dp2(FeatureDropout(x))
else:
dp2_out_seg = self.out_conv_dp2(x)
dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape)
x = self.up3(x, x1)
if self.training:
dp1_out_seg = self.out_conv_dp1(self.feature_noise(x))
else:
dp1_out_seg = self.out_conv_dp1(x)
dp1_out_seg = torch.nn.functional.interpolate(dp1_out_seg, shape)
x = self.up4(x, x0)
dp0_out_seg = self.out_conv(x)
return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg
def Dropout(x, p=0.3):
x = torch.nn.functional.dropout(x, p)
return x
def FeatureDropout(x):
attention = torch.mean(x, dim=1, keepdim=True)
max_val, _ = torch.max(attention.view(
x.size(0), -1), dim=1, keepdim=True)
threshold = max_val * np.random.uniform(0.7, 0.9)
threshold = threshold.view(x.size(0), 1, 1, 1).expand_as(attention)
drop_mask = (attention < threshold).float()
x = x.mul(drop_mask)
return x
class FeatureNoise(nn.Module):
def __init__(self, uniform_range=0.3):
super(FeatureNoise, self).__init__()
self.uni_dist = Uniform(-uniform_range, uniform_range)
def feature_based_noise(self, x):
noise_vector = self.uni_dist.sample(
x.shape[1:]).to(x.device).unsqueeze(0)
x_noise = x.mul(noise_vector) + x
return x_noise
def forward(self, x):
x = self.feature_based_noise(x)
return x
class UNet(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.decoder = Decoder(params)
def forward(self, x):
feature = self.encoder(x)
output = self.decoder(feature)
return output
class UNet_CCT(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet_CCT, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.main_decoder = Decoder(params)
self.aux_decoder1 = Decoder(params)
self.aux_decoder2 = Decoder(params)
self.aux_decoder3 = Decoder(params)
def forward(self, x):
feature = self.encoder(x)
main_seg = self.main_decoder(feature)
aux1_feature = [FeatureNoise()(i) for i in feature]
aux_seg1 = self.aux_decoder1(aux1_feature)
aux2_feature = [Dropout(i) for i in feature]
aux_seg2 = self.aux_decoder2(aux2_feature)
aux3_feature = [FeatureDropout(i) for i in feature]
aux_seg3 = self.aux_decoder3(aux3_feature)
return main_seg, aux_seg1, aux_seg2, aux_seg3
class UNet_URPC(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet_URPC, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.decoder = Decoder_URPC(params)
def forward(self, x):
shape = x.shape[2:]
feature = self.encoder(x)
dp1_out_seg, dp2_out_seg, dp3_out_seg, dp4_out_seg = self.decoder(
feature, shape)
return dp1_out_seg, dp2_out_seg, dp3_out_seg, dp4_out_seg
class UNet_DS(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet_DS, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.decoder = Decoder_DS(params)
def forward(self, x):
shape = x.shape[2:]
feature = self.encoder(x)
dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg = self.decoder(
feature, shape)
return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg
| 13,801 | 34.030457 | 79 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/efficientunet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.attention import *
from networks.efficient_encoder import get_encoder
def initialize_decoder(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
encoder_channels = encoder_channels[1:] # remove first skip with same spatial resolution
encoder_channels = encoder_channels[::-1] # reverse channels to start from head of encoder
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(
head_channels, head_channels, use_batchnorm=use_batchnorm
)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
class Effi_UNet(nn.Module):
"""Unet_ is a fully convolution neural network for image semantic segmentation
Args:
encoder_name: name of classification model (without last dense layers) used as feature
extractor to build segmentation model.
encoder_depth (int): number of stages used in decoder, larger depth - more features are generated.
e.g. for depth=3 encoder will generate list of features with following spatial shapes
[(H,W), (H/2, W/2), (H/4, W/4), (H/8, W/8)], so in general the deepest feature tensor will have
spatial resolution (H/(2^depth), W/(2^depth)]
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
decoder_channels: list of numbers of ``Conv2D`` layer filters in decoder blocks
decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used. If 'inplace' InplaceABN will be used, allows to decrease memory consumption.
One of [True, False, 'inplace']
decoder_attention_type: attention module used in decoder of the model
One of [``None``, ``scse``]
in_channels: number of input channels for model, default is 3.
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
activation: activation function to apply after final convolution;
One of [``sigmoid``, ``softmax``, ``logsoftmax``, ``identity``, callable, None]
aux_params: if specified model will have additional classification auxiliary output
build on top of encoder, supported params:
- classes (int): number of classes
- pooling (str): one of 'max', 'avg'. Default is 'avg'.
- dropout (float): dropout factor in [0, 1)
- activation (str): activation function to apply "sigmoid"/"softmax" (could be None to return logits)
Returns:
``torch.nn.Module``: **Unet**
.. _Unet:
https://arxiv.org/pdf/1505.04597
"""
def __init__(
self,
encoder_name: str = "resnet34",
encoder_depth: int = 5,
encoder_weights: str = "imagenet",
decoder_use_batchnorm=True,
decoder_channels=(256, 128, 64, 32, 16),
decoder_attention_type=None,
in_channels: int = 3,
classes: int = 1):
super().__init__()
self.encoder = get_encoder(
encoder_name,
in_channels=in_channels,
depth=encoder_depth,
weights=encoder_weights,
)
self.decoder = UnetDecoder(
encoder_channels=self.encoder.out_channels,
decoder_channels=decoder_channels,
n_blocks=encoder_depth,
use_batchnorm=decoder_use_batchnorm,
center=True if encoder_name.startswith("vgg") else False,
attention_type=decoder_attention_type,
)
initialize_decoder(self.decoder)
self.classifier = nn.Conv2d(decoder_channels[-1], classes, 1)
def forward(self, x):
"""Sequentially pass `x` trough model`s encoder, decoder and heads"""
features = self.encoder(x)
decoder_output = self.decoder(*features)
output = self.classifier(decoder_output)
return output
# unet = UNet('efficientnet-b3', encoder_weights='imagenet', in_channels=1, classes=1, decoder_attention_type="scse")
# t = torch.rand(2, 1, 224, 224)
# print(unet)
# print(unet(t).shape)
| 7,930 | 34.725225 | 117 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/nnunet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import torch.nn.functional as F
from torch import nn
import torch
import numpy as np
from networks.neural_network import SegmentationNetwork
import torch.nn.functional
def softmax_helper(x): return F.softmax(x, 1)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(
module.weight, a=self.neg_slope)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class ConvDropoutNormNonlin(nn.Module):
"""
fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.
"""
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1,
'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(
input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
class ConvDropoutNonlinNorm(ConvDropoutNormNonlin):
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.instnorm(self.lrelu(x))
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
'''
stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers
:param input_feature_channels:
:param output_feature_channels:
:param num_convs:
:param dilation:
:param kernel_size:
:param padding:
:param dropout:
:param initial_stride:
:param conv_op:
:param norm_op:
:param dropout_op:
:param inplace:
:param neg_slope:
:param norm_affine:
:param conv_bias:
'''
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1,
'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([basic_block(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[basic_block(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def print_module_training_status(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
nn.BatchNorm1d):
print(str(module), module.training)
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class Generic_UNet(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D = 19739648
use_this_for_batch_size_computation_3D = 520000000 # 505789440
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None, basic_block=ConvDropoutNormNonlin,
seg_output_use_bias=False):
"""
basically more flexible than v1, architecture is the same
Does this look complicated? Nah bro. Functionality > usability
This does everything you need, including world peace.
Questions? -> [email protected]
"""
super(Generic_UNet, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
else:
raise ValueError(
"unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.input_shape_must_be_divisible_by = np.prod(
pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])
if max_num_features is None:
if self.conv_op == nn.Conv3d:
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
# determine the first stride
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d - 1]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
# add convolutions
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(
np.round(output_features * feat_map_mul_on_downscale))
output_features = min(output_features, self.max_num_features)
# now the bottleneck.
# determine the first stride
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
# the output of the last conv must match the number of features from the skip connection if we are not using
# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
# done by the transposed conv
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))
# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
# now lets build the localization pathway
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[
-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_pool - 1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(
3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(
Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],
pool_op_kernel_sizes[-(u + 1)], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]
self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,
1, 1, 0, 1, 1, seg_output_use_bias))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(
np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_pool - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),
mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(
self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(
self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
if self.weightInitializer is not None:
self.apply(self.weightInitializer)
# self.apply(print_module_training_status)
def forward(self, x):
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))
if self._deep_supervision and self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in
zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
@staticmethod
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,
conv_per_stage=2):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param deep_supervision:
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:param num_modalities:
:param num_classes:
:param pool_op_kernel_sizes:
:return:
"""
if not isinstance(num_pool_per_axis, np.ndarray):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +
num_modalities * np.prod(map_size, dtype=np.int64) +
num_classes * np.prod(map_size, dtype=np.int64))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min(num_feat * 2, max_num_features)
# conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv
num_blocks = (conv_per_stage * 2 +
1) if p < (npool - 1) else conv_per_stage
tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat
if deep_supervision and p < (npool - 2):
tmp += np.prod(map_size, dtype=np.int64) * num_classes
# print(p, map_size, num_feat, tmp)
return tmp
default_dict = {
"base_num_features": 16,
"conv_per_stage": 2,
"initial_lr": 0.01,
"lr_scheduler": None,
"lr_scheduler_eps": 0.001,
"lr_scheduler_patience": 30,
"lr_threshold": 1e-06,
"max_num_epochs": 1000,
"net_conv_kernel_sizes": [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
"net_num_pool_op_kernel_sizes": [[1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]],
"net_pool_per_axis": [2, 6, 6],
"num_batches_per_epoch": 250,
"num_classes": 3,
"num_input_channels": 1,
"transpose_backward": [0, 1, 2],
"transpose_forward": [0, 1, 2],
}
def initialize_network(threeD=True, num_classes=2):
"""
This is specific to the U-Net and must be adapted for other network architectures
:return:
"""
# self.print_to_log_file(self.net_num_pool_op_kernel_sizes)
# self.print_to_log_file(self.net_conv_kernel_sizes)
if threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
default_dict["num_classes"] = num_classes
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
network = Generic_UNet(default_dict["num_input_channels"], default_dict["base_num_features"], default_dict["num_classes"], len(default_dict["net_num_pool_op_kernel_sizes"]),
default_dict["conv_per_stage"], 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(
1e-2),
default_dict["net_num_pool_op_kernel_sizes"], default_dict["net_conv_kernel_sizes"], False, True, True)
print("nnUNet have {} paramerters in total".format(
sum(x.numel() for x in network.parameters())))
return network.cuda()
# input = torch.FloatTensor(1, 1, 32, 192, 192)
# input_var = input.cuda()
# model = initialize_network(threeD=True)
# out = model(input_var)
# print(out.size())
| 23,919 | 43.71028 | 177 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/networks_other.py
|
import functools
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import init
from torch.optim import lr_scheduler
###############################################################################
# Functions
###############################################################################
def weights_init_normal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
#print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def adjust_learning_rate(optimizer, lr):
"""Sets the learning rate to a fixed number"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_scheduler(optimizer, opt):
print('opt.lr_policy = [{}]'.format(opt.lr_policy))
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
elif opt.lr_policy == 'step2':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
print('schedular=plateau')
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, threshold=0.01, patience=5)
elif opt.lr_policy == 'plateau2':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'step_warmstart':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 100:
lr_l = 1
elif 100 <= epoch < 200:
lr_l = 0.1
elif 200 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step_warmstart2':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 50:
lr_l = 1
elif 50 <= epoch < 100:
lr_l = 0.1
elif 100 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
init_weights(netG, init_type=init_type)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def get_n_parameters(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
return num_params
def measure_fp_bp_time(model, x, y):
# synchronize gpu time and measure fp
torch.cuda.synchronize()
t0 = time.time()
y_pred = model(x)
torch.cuda.synchronize()
elapsed_fp = time.time() - t0
if isinstance(y_pred, tuple):
y_pred = sum(y_p.sum() for y_p in y_pred)
else:
y_pred = y_pred.sum()
# zero gradients, synchronize time and measure
model.zero_grad()
t0 = time.time()
#y_pred.backward(y)
y_pred.backward()
torch.cuda.synchronize()
elapsed_bp = time.time() - t0
return elapsed_fp, elapsed_bp
def benchmark_fp_bp_time(model, x, y, n_trial=1000):
# transfer the model on GPU
model.cuda()
# DRY RUNS
for i in range(10):
_, _ = measure_fp_bp_time(model, x, y)
print('DONE WITH DRY RUNS, NOW BENCHMARKING')
# START BENCHMARKING
t_forward = []
t_backward = []
print('trial: {}'.format(n_trial))
for i in range(n_trial):
t_fp, t_bp = measure_fp_bp_time(model, x, y)
t_forward.append(t_fp)
t_backward.append(t_bp)
# free memory
del model
return np.mean(t_forward), np.mean(t_backward)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
| 20,202 | 37.118868 | 151 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/vnet.py
|
import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class VNet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False):
super(VNet, self).__init__()
self.has_dropout = has_dropout
self.block_one = ConvBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = ConvBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = ConvBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
return out
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if __name__ == '__main__':
# compute FLOPS & PARAMETERS
from thop import profile
from thop import clever_format
model = VNet(n_channels=1, n_classes=2)
input = torch.randn(4, 1, 112, 112, 80)
flops, params = profile(model, inputs=(input,))
print(flops, params)
macs, params = clever_format([flops, params], "%.3f")
print(macs, params)
print("VNet have {} paramerters in total".format(sum(x.numel() for x in model.parameters())))
| 9,541 | 35.984496 | 110 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/attention.py
|
import torch.nn as nn
try:
from inplace_abn import InPlaceABN
except ImportError:
InPlaceABN = None
class Conv2dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
if use_batchnorm == "inplace" and InPlaceABN is None:
raise RuntimeError(
"In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. "
+ "To install see: https://github.com/mapillary/inplace_abn"
)
super().__init__()
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
if use_batchnorm == "inplace":
bn = InPlaceABN(out_channels, activation="leaky_relu", activation_param=0.0)
relu = nn.Identity()
elif use_batchnorm and use_batchnorm != "inplace":
bn = nn.BatchNorm2d(out_channels)
else:
bn = nn.Identity()
super(Conv2dReLU, self).__init__(conv, bn, relu)
class SCSEModule(nn.Module):
def __init__(self, in_channels, reduction=16):
super().__init__()
self.cSE = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // reduction, 1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // reduction, in_channels, 1),
nn.Sigmoid(),
)
self.sSE = nn.Sequential(nn.Conv2d(in_channels, 1, 1), nn.Sigmoid())
def forward(self, x):
return x * self.cSE(x) + x * self.sSE(x)
class Activation(nn.Module):
def __init__(self, name, **params):
super().__init__()
if name is None or name == 'identity':
self.activation = nn.Identity(**params)
elif name == 'sigmoid':
self.activation = nn.Sigmoid()
elif name == 'softmax2d':
self.activation = nn.Softmax(dim=1, **params)
elif name == 'softmax':
self.activation = nn.Softmax(**params)
elif name == 'logsoftmax':
self.activation = nn.LogSoftmax(**params)
elif callable(name):
self.activation = name(**params)
else:
raise ValueError('Activation should be callable/sigmoid/softmax/logsoftmax/None; got {}'.format(name))
def forward(self, x):
return self.activation(x)
class Attention(nn.Module):
def __init__(self, name, **params):
super().__init__()
if name is None:
self.attention = nn.Identity(**params)
elif name == 'scse':
self.attention = SCSEModule(**params)
else:
raise ValueError("Attention {} is not implemented".format(name))
def forward(self, x):
return self.attention(x)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
| 3,104 | 26.972973 | 114 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/enet.py
|
import torch.nn as nn
import torch
class InitialBlock(nn.Module):
"""The initial block is composed of two branches:
1. a main branch which performs a regular convolution with stride 2;
2. an extension branch which performs max-pooling.
Doing both operations in parallel and concatenating their results
allows for efficient downsampling and expansion. The main branch
outputs 13 feature maps while the extension branch outputs 3, for a
total of 16 feature maps after concatenation.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number output channels.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
bias=False,
relu=True):
super().__init__()
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - As stated above the number of output channels for this
# branch is the total minus 3, since the remaining channels come from
# the extension branch
self.main_branch = nn.Conv2d(
in_channels,
out_channels - in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=bias)
# Extension branch
self.ext_branch = nn.MaxPool2d(3, stride=2, padding=1)
# Initialize batch normalization to be used after concatenation
self.batch_norm = nn.BatchNorm2d(out_channels)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
main = self.main_branch(x)
ext = self.ext_branch(x)
# Concatenate branches
out = torch.cat((main, ext), 1)
# Apply batch normalization
out = self.batch_norm(out)
return self.out_activation(out)
class RegularBottleneck(nn.Module):
"""Regular bottlenecks are the main building block of ENet.
Main branch:
1. Shortcut connection.
Extension branch:
1. 1x1 convolution which decreases the number of channels by
``internal_ratio``, also called a projection;
2. regular, dilated or asymmetric convolution;
3. 1x1 convolution which increases the number of channels back to
``channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- channels (int): the number of input and output channels.
- internal_ratio (int, optional): a scale factor applied to
``channels`` used to compute the number of
channels after the projection. eg. given ``channels`` equal to 128 and
internal_ratio equal to 2 the number of channels after the projection
is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer described above in item 2 of the extension
branch. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- dilation (int, optional): spacing between kernel elements for the
convolution described in item 2 of the extension branch. Default: 1.
asymmetric (bool, optional): flags if the convolution described in
item 2 of the extension branch is asymmetric or not. Default: False.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - shortcut connection
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution, and,
# finally, a regularizer (spatial dropout). Number of channels is constant.
# 1x1 projection convolution
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
channels,
internal_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# If the convolution is asymmetric we split the main convolution in
# two. Eg. for a 5x5 asymmetric convolution we have two convolution:
# the first is 5x1 and the second is 1x5.
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation(),
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after adding the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
main = x
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class DownsamplingBottleneck(nn.Module):
"""Downsampling bottlenecks further downsample the feature map size.
Main branch:
1. max pooling with stride 2; indices are saved to be used for
unpooling later.
Extension branch:
1. 2x2 convolution with stride 2 that decreases the number of channels
by ``internal_ratio``, also called a projection;
2. regular convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``channels``
used to compute the number of channels after the projection. eg. given
``channels`` equal to 128 and internal_ratio equal to 2 the number of
channels after the projection is 64. Default: 4.
- return_indices (bool, optional): if ``True``, will return the max
indices along with the outputs. Useful when unpooling later.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Store parameters that are needed later
self.return_indices = return_indices
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.MaxPool2d(
2,
stride=2,
return_indices=return_indices)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=3,
stride=1,
padding=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
out_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
if self.return_indices:
main, max_indices = self.main_max1(x)
else:
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
n, ch_ext, h, w = ext.size()
ch_main = main.size()[1]
padding = torch.zeros(n, ch_ext - ch_main, h, w)
# Before concatenating, check if main is on the CPU or GPU and
# convert padding accordingly
if main.is_cuda:
padding = padding.cuda()
# Concatenate
main = torch.cat((main, padding), 1)
# Add main and extension branches
out = main + ext
return self.out_activation(out), max_indices
class UpsamplingBottleneck(nn.Module):
"""The upsampling bottlenecks upsample the feature map resolution using max
pooling indices stored from the corresponding downsampling bottleneck.
Main branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. max unpool layer using the max pool indices from the corresponding
downsampling max pool layer.
Extension branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. transposed convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``in_channels``
used to compute the number of channels after the projection. eg. given
``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number
of channels after the projection is 64. Default: 4.
- dropout_prob (float, optional): probability of an element to be zeroed.
Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if ``True``.
Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation())
# Transposed convolution
self.ext_tconv1 = nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias)
self.ext_tconv1_bnorm = nn.BatchNorm2d(internal_channels)
self.ext_tconv1_activation = activation()
# 1x1 expansion convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x, max_indices, output_size):
# Main branch shortcut
main = self.main_conv1(x)
main = self.main_unpool1(
main, max_indices, output_size=output_size)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_tconv1(ext, output_size=output_size)
ext = self.ext_tconv1_bnorm(ext)
ext = self.ext_tconv1_activation(ext)
ext = self.ext_conv2(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class ENet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
def __init__(self, in_channels, num_classes, encoder_relu=False, decoder_relu=True):
super().__init__()
self.initial_block = InitialBlock(in_channels, 16, relu=encoder_relu)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(
16,
64,
return_indices=True,
dropout_prob=0.01,
relu=encoder_relu)
self.regular1_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(
64,
128,
return_indices=True,
dropout_prob=0.1,
relu=encoder_relu)
self.regular2_1 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder
self.regular3_0 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(
128, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(
64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(
16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(
16,
num_classes,
kernel_size=3,
stride=2,
padding=1,
bias=False)
def forward(self, x):
# Initial block
input_size = x.size()
x = self.initial_block(x)
# Stage 1 - Encoder
stage1_input_size = x.size()
x, max_indices1_0 = self.downsample1_0(x)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder
stage2_input_size = x.size()
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
x = self.regular3_0(x)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x = self.dilated3_7(x)
# Stage 4 - Decoder
x = self.upsample4_0(x, max_indices2_0, output_size=stage2_input_size)
x = self.regular4_1(x)
x = self.regular4_2(x)
# Stage 5 - Decoder
x = self.upsample5_0(x, max_indices1_0, output_size=stage1_input_size)
x = self.regular5_1(x)
x = self.transposed_conv(x, output_size=input_size)
return x
| 22,927 | 36.281301 | 88 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/unet_3D_dv_semi.py
|
"""
This file is adapted from https://github.com/ozan-oktay/Attention-Gated-Networks
"""
import math
import torch
import torch.nn as nn
from networks.utils import UnetConv3, UnetUp3, UnetUp3_CT, UnetDsv3
import torch.nn.functional as F
from networks.networks_other import init_weights
class unet_3D_dv_semi(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(unet_3D_dv_semi, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
# deep supervision
self.dsv4 = UnetDsv3(
in_size=filters[3], out_size=n_classes, scale_factor=8)
self.dsv3 = UnetDsv3(
in_size=filters[2], out_size=n_classes, scale_factor=4)
self.dsv2 = UnetDsv3(
in_size=filters[1], out_size=n_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(
in_channels=filters[0], out_channels=n_classes, kernel_size=1)
self.dropout1 = nn.Dropout3d(p=0.5)
self.dropout2 = nn.Dropout3d(p=0.3)
self.dropout3 = nn.Dropout3d(p=0.2)
self.dropout4 = nn.Dropout3d(p=0.1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up4 = self.dropout1(up4)
up3 = self.up_concat3(conv3, up4)
up3 = self.dropout2(up3)
up2 = self.up_concat2(conv2, up3)
up2 = self.dropout3(up2)
up1 = self.up_concat1(conv1, up2)
up1 = self.dropout4(up1)
# Deep Supervision
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
return dsv1, dsv2, dsv3, dsv4
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
| 3,865 | 33.212389 | 104 |
py
|
SSL4MIS
|
SSL4MIS-master/code/networks/unet_3D.py
|
# -*- coding: utf-8 -*-
"""
An implementation of the 3D U-Net paper:
Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf Ronneberger:
3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation.
MICCAI (2) 2016: 424-432
Note that there are some modifications from the original paper, such as
the use of batch normalization, dropout, and leaky relu here.
The implementation is borrowed from: https://github.com/ozan-oktay/Attention-Gated-Networks
"""
import math
import torch.nn as nn
import torch.nn.functional as F
from networks.networks_other import init_weights
from networks.utils import UnetConv3, UnetUp3, UnetUp3_CT
class unet_3D(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(unet_3D, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
# final conv (without any concat)
self.final = nn.Conv3d(filters[0], n_classes, 1)
self.dropout1 = nn.Dropout(p=0.3)
self.dropout2 = nn.Dropout(p=0.3)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
center = self.dropout1(center)
up4 = self.up_concat4(conv4, center)
up3 = self.up_concat3(conv3, up4)
up2 = self.up_concat2(conv2, up3)
up1 = self.up_concat1(conv1, up2)
up1 = self.dropout2(up1)
final = self.final(up1)
return final
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
| 3,617 | 34.821782 | 104 |
py
|
SSL4MIS
|
SSL4MIS-master/code/augmentations/__init__.py
|
import json
from collections import OrderedDict
from augmentations.ctaugment import *
class StorableCTAugment(CTAugment):
def load_state_dict(self, state):
for k in ["decay", "depth", "th", "rates"]:
assert k in state, "{} not in {}".format(k, state.keys())
setattr(self, k, state[k])
def state_dict(self):
return OrderedDict(
[(k, getattr(self, k)) for k in ["decay", "depth", "th", "rates"]]
)
def get_default_cta():
return StorableCTAugment()
def cta_apply(pil_img, ops):
if ops is None:
return pil_img
for op, args in ops:
pil_img = OPS[op].f(pil_img, *args)
return pil_img
def deserialize(policy_str):
return [OP(f=x[0], bins=x[1]) for x in json.loads(policy_str)]
def stats(cta):
return "\n".join(
"%-16s %s"
% (
k,
" / ".join(
" ".join("%.2f" % x for x in cta.rate_to_p(rate))
for rate in cta.rates[k]
),
)
for k in sorted(OPS.keys())
)
def interleave(x, batch, inverse=False):
"""
TF code
def interleave(x, batch):
s = x.get_shape().as_list()
return tf.reshape(tf.transpose(tf.reshape(x, [-1, batch] + s[1:]), [1, 0] + list(range(2, 1+len(s)))), [-1] + s[1:])
"""
shape = x.shape
axes = [batch, -1] if inverse else [-1, batch]
return x.reshape(*axes, *shape[1:]).transpose(0, 1).reshape(-1, *shape[1:])
def deinterleave(x, batch):
return interleave(x, batch, inverse=True)
| 1,560 | 23.777778 | 124 |
py
|
SSL4MIS
|
SSL4MIS-master/code/augmentations/ctaugment.py
|
# https://raw.githubusercontent.com/google-research/fixmatch/master/libml/ctaugment.py
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control Theory based self-augmentation, modified from https://github.com/vfdev-5/FixMatch-pytorch"""
import random
import torch
from collections import namedtuple
import numpy as np
from scipy.ndimage.interpolation import zoom
from PIL import Image, ImageOps, ImageEnhance, ImageFilter
OPS = {}
OP = namedtuple("OP", ("f", "bins"))
Sample = namedtuple("Sample", ("train", "probe"))
def register(*bins):
def wrap(f):
OPS[f.__name__] = OP(f, bins)
return f
return wrap
class CTAugment(object):
def __init__(self, depth=2, th=0.85, decay=0.99):
self.decay = decay
self.depth = depth
self.th = th
self.rates = {}
for k, op in OPS.items():
self.rates[k] = tuple([np.ones(x, "f") for x in op.bins])
def rate_to_p(self, rate):
p = rate + (1 - self.decay) # Avoid to have all zero.
p = p / p.max()
p[p < self.th] = 0
return p
def policy(self, probe, weak):
num_strong_ops = 11
kl_weak = list(OPS.keys())[num_strong_ops:]
kl_strong = list(OPS.keys())[:num_strong_ops]
if weak:
kl = kl_weak
else:
kl = kl_strong
v = []
if probe:
for _ in range(self.depth):
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
v.append(OP(k, rnd.tolist()))
return v
for _ in range(self.depth):
vt = []
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
for r, bin in zip(rnd, bins):
p = self.rate_to_p(bin)
value = np.random.choice(p.shape[0], p=p / p.sum())
vt.append((value + r) / p.shape[0])
v.append(OP(k, vt))
return v
def update_rates(self, policy, proximity):
for k, bins in policy:
for p, rate in zip(bins, self.rates[k]):
p = int(p * len(rate) * 0.999)
rate[p] = rate[p] * self.decay + proximity * (1 - self.decay)
print(f"\t {k} weights updated")
def stats(self):
return "\n".join(
"%-16s %s"
% (
k,
" / ".join(
" ".join("%.2f" % x for x in self.rate_to_p(rate))
for rate in self.rates[k]
),
)
for k in sorted(OPS.keys())
)
def _enhance(x, op, level):
return op(x).enhance(0.1 + 1.9 * level)
def _imageop(x, op, level):
return Image.blend(x, op(x), level)
def _filter(x, op, level):
return Image.blend(x, x.filter(op), level)
@register(17)
def autocontrast(x, level):
return _imageop(x, ImageOps.autocontrast, level)
@register(17)
def brightness(x, brightness):
return _enhance(x, ImageEnhance.Brightness, brightness)
@register(17)
def color(x, color):
return _enhance(x, ImageEnhance.Color, color)
@register(17)
def contrast(x, contrast):
return _enhance(x, ImageEnhance.Contrast, contrast)
@register(17)
def equalize(x, level):
return _imageop(x, ImageOps.equalize, level)
@register(17)
def invert(x, level):
return _imageop(x, ImageOps.invert, level)
@register(8)
def posterize(x, level):
level = 1 + int(level * 7.999)
return ImageOps.posterize(x, level)
@register(17)
def solarize(x, th):
th = int(th * 255.999)
return ImageOps.solarize(x, th)
@register(17)
def smooth(x, level):
return _filter(x, ImageFilter.SMOOTH, level)
@register(17)
def blur(x, level):
return _filter(x, ImageFilter.BLUR, level)
@register(17)
def sharpness(x, sharpness):
return _enhance(x, ImageEnhance.Sharpness, sharpness)
# weak after here
@register(17)
def cutout(x, level):
"""Apply cutout to pil_img at the specified level."""
size = 1 + int(level * min(x.size) * 0.499)
img_height, img_width = x.size
height_loc = np.random.randint(low=img_height // 2, high=img_height)
width_loc = np.random.randint(low=img_height // 2, high=img_width)
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (
min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2),
)
pixels = x.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
x.putpixel((i, j), 0) # set the color accordingly
return x
@register()
def identity(x):
return x
@register(17, 6)
def rescale(x, scale, method):
s = x.size
scale *= 0.25
crop = (scale * s[0], scale * s[1], s[0] * (1 - scale), s[1] * (1 - scale))
methods = (
Image.ANTIALIAS,
Image.BICUBIC,
Image.BILINEAR,
Image.BOX,
Image.HAMMING,
Image.NEAREST,
)
method = methods[int(method * 5.99)]
return x.crop(crop).resize(x.size, method)
@register(17)
def rotate(x, angle):
angle = int(np.round((2 * angle - 1) * 45))
return x.rotate(angle)
@register(17)
def shear_x(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, shear, 0, 0, 1, 0))
@register(17)
def shear_y(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, shear, 1, 0))
@register(17)
def translate_x(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, delta, 0, 1, 0))
@register(17)
def translate_y(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, delta))
| 6,431 | 25.146341 | 103 |
py
|
SSL4MIS
|
SSL4MIS-master/code/dataloaders/acdc_data_processing.py
|
import glob
import os
import h5py
import numpy as np
import SimpleITK as sitk
slice_num = 0
mask_path = sorted(glob.glob("/home/xdluo/data/ACDC/image/*.nii.gz"))
for case in mask_path:
img_itk = sitk.ReadImage(case)
origin = img_itk.GetOrigin()
spacing = img_itk.GetSpacing()
direction = img_itk.GetDirection()
image = sitk.GetArrayFromImage(img_itk)
msk_path = case.replace("image", "label").replace(".nii.gz", "_gt.nii.gz")
if os.path.exists(msk_path):
print(msk_path)
msk_itk = sitk.ReadImage(msk_path)
mask = sitk.GetArrayFromImage(msk_itk)
image = (image - image.min()) / (image.max() - image.min())
print(image.shape)
image = image.astype(np.float32)
item = case.split("/")[-1].split(".")[0]
if image.shape != mask.shape:
print("Error")
print(item)
for slice_ind in range(image.shape[0]):
f = h5py.File(
'/home/xdluo/data/ACDC/data/{}_slice_{}.h5'.format(item, slice_ind), 'w')
f.create_dataset(
'image', data=image[slice_ind], compression="gzip")
f.create_dataset('label', data=mask[slice_ind], compression="gzip")
f.close()
slice_num += 1
print("Converted all ACDC volumes to 2D slices")
print("Total {} slices".format(slice_num))
| 1,353 | 34.631579 | 89 |
py
|
SSL4MIS
|
SSL4MIS-master/code/dataloaders/brats2019.py
|
import os
import torch
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
import itertools
from torch.utils.data.sampler import Sampler
class BraTS2019(Dataset):
""" BraTS2019 Dataset """
def __init__(self, base_dir=None, split='train', num=None, transform=None):
self._base_dir = base_dir
self.transform = transform
self.sample_list = []
train_path = self._base_dir+'/train.txt'
test_path = self._base_dir+'/val.txt'
if split == 'train':
with open(train_path, 'r') as f:
self.image_list = f.readlines()
elif split == 'test':
with open(test_path, 'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n', '').split(",")[0] for item in self.image_list]
if num is not None:
self.image_list = self.image_list[:num]
print("total {} samples".format(len(self.image_list)))
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
image_name = self.image_list[idx]
h5f = h5py.File(self._base_dir + "/data/{}.h5".format(image_name), 'r')
image = h5f['image'][:]
label = h5f['label'][:]
sample = {'image': image, 'label': label.astype(np.uint8)}
if self.transform:
sample = self.transform(sample)
return sample
class CenterCrop(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
(w, h, d) = image.shape
w1 = int(round((w - self.output_size[0]) / 2.))
h1 = int(round((h - self.output_size[1]) / 2.))
d1 = int(round((d - self.output_size[2]) / 2.))
label = label[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomCrop(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size, with_sdf=False):
self.output_size = output_size
self.with_sdf = with_sdf
def __call__(self, sample):
image, label = sample['image'], sample['label']
if self.with_sdf:
sdf = sample['sdf']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
if self.with_sdf:
sdf = np.pad(sdf, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
(w, h, d) = image.shape
# if np.random.uniform() > 0.33:
# w1 = np.random.randint((w - self.output_size[0])//4, 3*(w - self.output_size[0])//4)
# h1 = np.random.randint((h - self.output_size[1])//4, 3*(h - self.output_size[1])//4)
# else:
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
label = label[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
if self.with_sdf:
sdf = sdf[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label, 'sdf': sdf}
else:
return {'image': image, 'label': label}
class RandomRotFlip(object):
"""
Crop randomly flip the dataset in a sample
Args:
output_size (int): Desired output size
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return {'image': image, 'label': label}
class RandomNoise(object):
def __init__(self, mu=0, sigma=0.1):
self.mu = mu
self.sigma = sigma
def __call__(self, sample):
image, label = sample['image'], sample['label']
noise = np.clip(self.sigma * np.random.randn(
image.shape[0], image.shape[1], image.shape[2]), -2*self.sigma, 2*self.sigma)
noise = noise + self.mu
image = image + noise
return {'image': image, 'label': label}
class CreateOnehotLabel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, sample):
image, label = sample['image'], sample['label']
onehot_label = np.zeros(
(self.num_classes, label.shape[0], label.shape[1], label.shape[2]), dtype=np.float32)
for i in range(self.num_classes):
onehot_label[i, :, :, :] = (label == i).astype(np.float32)
return {'image': image, 'label': label, 'onehot_label': onehot_label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
image = image.reshape(
1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
if 'onehot_label' in sample:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long(),
'onehot_label': torch.from_numpy(sample['onehot_label']).long()}
else:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long()}
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 8,814 | 36.194093 | 112 |
py
|
SSL4MIS
|
SSL4MIS-master/code/dataloaders/utils.py
|
import os
import torch
import numpy as np
import torch.nn as nn
# import matplotlib.pyplot as plt
from skimage import measure
import scipy.ndimage as nd
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def get_cityscapes_labels():
return np.array([
# [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]])
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_seg_map_sequence(label_masks, dataset='pascal'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'pascal':
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == 'cityscapes':
n_classes = 19
label_colours = get_cityscapes_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
# for key, val in param.items():
# log_file.write(key + ':' + str(val) + '\n')
log_file.write(str(param))
log_file.close()
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)
if weight is None:
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
else:
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False)
loss = criterion(logit, target.long())
if size_average:
loss /= (h * w)
if batch_average:
loss /= n
return loss
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
def get_iou(pred, gt, n_classes=21):
total_iou = 0.0
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
intersect = [0] * n_classes
union = [0] * n_classes
for j in range(n_classes):
match = (pred_tmp == j) + (gt_tmp == j)
it = torch.sum(match == 2).item()
un = torch.sum(match > 0).item()
intersect[j] += it
union[j] += un
iou = []
for k in range(n_classes):
if union[k] == 0:
continue
iou.append(intersect[k] / union[k])
img_iou = (sum(iou) / len(iou))
total_iou += img_iou
return total_iou
def get_dice(pred, gt):
total_dice = 0.0
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
print(dice)
total_dice += dice
return total_dice
def get_mc_dice(pred, gt, num=2):
# num is the total number of classes, include the background
total_dice = np.zeros(num-1)
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
for j in range(1, num):
pred_tmp = (pred[i]==j)
gt_tmp = (gt[i]==j)
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
total_dice[j-1] +=dice
return total_dice
def post_processing(prediction):
prediction = nd.binary_fill_holes(prediction)
label_cc, num_cc = measure.label(prediction,return_num=True)
total_cc = np.sum(prediction)
measure.regionprops(label_cc)
for cc in range(1,num_cc+1):
single_cc = (label_cc==cc)
single_vol = np.sum(single_cc)
if single_vol/total_cc<0.2:
prediction[single_cc]=0
return prediction
| 6,731 | 30.311628 | 144 |
py
|
SSL4MIS
|
SSL4MIS-master/code/dataloaders/dataset.py
|
import os
import cv2
import torch
import random
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
from scipy.ndimage.interpolation import zoom
from torchvision import transforms
import itertools
from scipy import ndimage
from torch.utils.data.sampler import Sampler
import augmentations
from augmentations.ctaugment import OPS
import matplotlib.pyplot as plt
from PIL import Image
class BaseDataSets(Dataset):
def __init__(
self,
base_dir=None,
split="train",
num=None,
transform=None,
ops_weak=None,
ops_strong=None,
):
self._base_dir = base_dir
self.sample_list = []
self.split = split
self.transform = transform
self.ops_weak = ops_weak
self.ops_strong = ops_strong
assert bool(ops_weak) == bool(
ops_strong
), "For using CTAugment learned policies, provide both weak and strong batch augmentation policy"
if self.split == "train":
with open(self._base_dir + "/train_slices.list", "r") as f1:
self.sample_list = f1.readlines()
self.sample_list = [item.replace("\n", "") for item in self.sample_list]
elif self.split == "val":
with open(self._base_dir + "/val.list", "r") as f:
self.sample_list = f.readlines()
self.sample_list = [item.replace("\n", "") for item in self.sample_list]
if num is not None and self.split == "train":
self.sample_list = self.sample_list[:num]
print("total {} samples".format(len(self.sample_list)))
def __len__(self):
return len(self.sample_list)
def __getitem__(self, idx):
case = self.sample_list[idx]
if self.split == "train":
h5f = h5py.File(self._base_dir + "/data/slices/{}.h5".format(case), "r")
else:
h5f = h5py.File(self._base_dir + "/data/{}.h5".format(case), "r")
image = h5f["image"][:]
label = h5f["label"][:]
sample = {"image": image, "label": label}
if self.split == "train":
if None not in (self.ops_weak, self.ops_strong):
sample = self.transform(sample, self.ops_weak, self.ops_strong)
else:
sample = self.transform(sample)
sample["idx"] = idx
return sample
def random_rot_flip(image, label=None):
k = np.random.randint(0, 4)
image = np.rot90(image, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
if label is not None:
label = np.rot90(label, k)
label = np.flip(label, axis=axis).copy()
return image, label
else:
return image
def random_rotate(image, label):
angle = np.random.randint(-20, 20)
image = ndimage.rotate(image, angle, order=0, reshape=False)
label = ndimage.rotate(label, angle, order=0, reshape=False)
return image, label
def color_jitter(image):
if not torch.is_tensor(image):
np_to_tensor = transforms.ToTensor()
image = np_to_tensor(image)
# s is the strength of color distortion.
s = 1.0
jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
return jitter(image)
class CTATransform(object):
def __init__(self, output_size, cta):
self.output_size = output_size
self.cta = cta
def __call__(self, sample, ops_weak, ops_strong):
image, label = sample["image"], sample["label"]
image = self.resize(image)
label = self.resize(label)
to_tensor = transforms.ToTensor()
# fix dimensions
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
# apply augmentations
image_weak = augmentations.cta_apply(transforms.ToPILImage()(image), ops_weak)
image_strong = augmentations.cta_apply(image_weak, ops_strong)
label_aug = augmentations.cta_apply(transforms.ToPILImage()(label), ops_weak)
label_aug = to_tensor(label_aug).squeeze(0)
label_aug = torch.round(255 * label_aug).int()
sample = {
"image_weak": to_tensor(image_weak),
"image_strong": to_tensor(image_strong),
"label_aug": label_aug,
}
return sample
def cta_apply(self, pil_img, ops):
if ops is None:
return pil_img
for op, args in ops:
pil_img = OPS[op].f(pil_img, *args)
return pil_img
def resize(self, image):
x, y = image.shape
return zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
class RandomGenerator(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample["image"], sample["label"]
# ind = random.randrange(0, img.shape[0])
# image = img[ind, ...]
# label = lab[ind, ...]
if random.random() > 0.5:
image, label = random_rot_flip(image, label)
elif random.random() > 0.5:
image, label = random_rotate(image, label)
x, y = image.shape
image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
sample = {"image": image, "label": label}
return sample
class WeakStrongAugment(object):
"""returns weakly and strongly augmented images
Args:
object (tuple): output size of network
"""
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample["image"], sample["label"]
image = self.resize(image)
label = self.resize(label)
# weak augmentation is rotation / flip
image_weak, label = random_rot_flip(image, label)
# strong augmentation is color jitter
image_strong = color_jitter(image_weak).type("torch.FloatTensor")
# fix dimensions
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
image_weak = torch.from_numpy(image_weak.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
sample = {
"image": image,
"image_weak": image_weak,
"image_strong": image_strong,
"label_aug": label,
}
return sample
def resize(self, image):
x, y = image.shape
return zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch) in zip(
grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size),
)
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 8,440 | 32.232283 | 105 |
py
|
SSL4MIS
|
SSL4MIS-master/code/dataloaders/brats_proprecessing.py
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage import measure
import nibabel as nib
import SimpleITK as sitk
import glob
def brain_bbox(data, gt):
mask = (data != 0)
brain_voxels = np.where(mask != 0)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
data_bboxed = data[minZidx:maxZidx, minXidx:maxXidx, minYidx:maxYidx]
gt_bboxed = gt[minZidx:maxZidx, minXidx:maxXidx, minYidx:maxYidx]
return data_bboxed, gt_bboxed
def volume_bounding_box(data, gt, expend=0, status="train"):
data, gt = brain_bbox(data, gt)
print(data.shape)
mask = (gt != 0)
brain_voxels = np.where(mask != 0)
z, x, y = data.shape
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
minZidx_jitterd = max(minZidx - expend, 0)
maxZidx_jitterd = min(maxZidx + expend, z)
minXidx_jitterd = max(minXidx - expend, 0)
maxXidx_jitterd = min(maxXidx + expend, x)
minYidx_jitterd = max(minYidx - expend, 0)
maxYidx_jitterd = min(maxYidx + expend, y)
data_bboxed = data[minZidx_jitterd:maxZidx_jitterd,
minXidx_jitterd:maxXidx_jitterd, minYidx_jitterd:maxYidx_jitterd]
print([minZidx, maxZidx, minXidx, maxXidx, minYidx, maxYidx])
print([minZidx_jitterd, maxZidx_jitterd,
minXidx_jitterd, maxXidx_jitterd, minYidx_jitterd, maxYidx_jitterd])
if status == "train":
gt_bboxed = np.zeros_like(data_bboxed, dtype=np.uint8)
gt_bboxed[expend:maxZidx_jitterd-expend, expend:maxXidx_jitterd -
expend, expend:maxYidx_jitterd - expend] = 1
return data_bboxed, gt_bboxed
if status == "test":
gt_bboxed = gt[minZidx_jitterd:maxZidx_jitterd,
minXidx_jitterd:maxXidx_jitterd, minYidx_jitterd:maxYidx_jitterd]
return data_bboxed, gt_bboxed
def itensity_normalize_one_volume(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean)/std
out_random = np.random.normal(0, 1, size=volume.shape)
# out[volume == 0] = out_random[volume == 0]
out = out.astype(np.float32)
return out
class MedicalImageDeal(object):
def __init__(self, img, percent=1):
self.img = img
self.percent = percent
@property
def valid_img(self):
from skimage import exposure
cdf = exposure.cumulative_distribution(self.img)
watershed = cdf[1][cdf[0] >= self.percent][0]
return np.clip(self.img, self.img.min(), watershed)
@property
def norm_img(self):
return (self.img - self.img.min()) / (self.img.max() - self.img.min())
all_flair = glob.glob("flair/*_flair.nii.gz")
for p in all_flair:
data = sitk.GetArrayFromImage(sitk.ReadImage(p))
lab = sitk.GetArrayFromImage(sitk.ReadImage(p.replace("flair", "seg")))
img, lab = brain_bbox(data, lab)
img = MedicalImageDeal(img, percent=0.999).valid_img
img = itensity_normalize_one_volume(img)
lab[lab > 0] = 1
uid = p.split("/")[-1]
sitk.WriteImage(sitk.GetImageFromArray(
img), "/media/xdluo/Data/brats19/data/flair/{}".format(uid))
sitk.WriteImage(sitk.GetImageFromArray(
lab), "/media/xdluo/Data/brats19/data/label/{}".format(uid))
| 3,862 | 33.801802 | 88 |
py
|
SSL4MIS
|
SSL4MIS-master/code/utils/losses.py
|
import torch
from torch.nn import functional as F
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def dice_loss1(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target)
z_sum = torch.sum(score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def entropy_loss(p, C=2):
# p N*C*W*H*D
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1) / \
torch.tensor(np.log(C)).cuda()
ent = torch.mean(y1)
return ent
def softmax_dice_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
n = input_logits.shape[1]
dice = 0
for i in range(0, n):
dice += dice_loss1(input_softmax[:, i], target_softmax[:, i])
mean_dice = dice / n
return mean_dice
def entropy_loss_map(p, C=2):
ent = -1*torch.sum(p * torch.log(p + 1e-6), dim=1,
keepdim=True)/torch.tensor(np.log(C)).cuda()
return ent
def softmax_mse_loss(input_logits, target_logits, sigmoid=False):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
if sigmoid:
input_softmax = torch.sigmoid(input_logits)
target_softmax = torch.sigmoid(target_logits)
else:
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
mse_loss = (input_softmax-target_softmax)**2
return mse_loss
def softmax_kl_loss(input_logits, target_logits, sigmoid=False):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
if sigmoid:
input_log_softmax = torch.log(torch.sigmoid(input_logits))
target_softmax = torch.sigmoid(target_logits)
else:
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='mean')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
return torch.mean((input1 - input2)**2)
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)):
self.alpha = torch.Tensor([alpha, 1-alpha])
if isinstance(alpha, list):
self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
# N,C,H,W => N,C,H*W
input = input.view(input.size(0), input.size(1), -1)
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
class DiceLoss(nn.Module):
def __init__(self, n_classes):
super(DiceLoss, self).__init__()
self.n_classes = n_classes
def _one_hot_encoder(self, input_tensor):
tensor_list = []
for i in range(self.n_classes):
temp_prob = input_tensor == i * torch.ones_like(input_tensor)
tensor_list.append(temp_prob)
output_tensor = torch.cat(tensor_list, dim=1)
return output_tensor.float()
def _dice_loss(self, score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def forward(self, inputs, target, weight=None, softmax=False):
if softmax:
inputs = torch.softmax(inputs, dim=1)
target = self._one_hot_encoder(target)
if weight is None:
weight = [1] * self.n_classes
assert inputs.size() == target.size(), 'predict & target shape do not match'
class_wise_dice = []
loss = 0.0
for i in range(0, self.n_classes):
dice = self._dice_loss(inputs[:, i], target[:, i])
class_wise_dice.append(1.0 - dice.item())
loss += dice * weight[i]
return loss / self.n_classes
def entropy_minmization(p):
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1)
ent = torch.mean(y1)
return ent
def entropy_map(p):
ent_map = -1*torch.sum(p * torch.log(p + 1e-6), dim=1,
keepdim=True)
return ent_map
def compute_kl_loss(p, q):
p_loss = F.kl_div(F.log_softmax(p, dim=-1),
F.softmax(q, dim=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, dim=-1),
F.softmax(p, dim=-1), reduction='none')
# Using function "sum" and "mean" are depending on your task
p_loss = p_loss.mean()
q_loss = q_loss.mean()
loss = (p_loss + q_loss) / 2
return loss
| 6,990 | 30.777273 | 85 |
py
|
SSL4MIS
|
SSL4MIS-master/code/utils/util.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import numpy as np
import re
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg
import torch
from torch.utils.data.sampler import Sampler
import torch.distributed as dist
import networks
# many issues with this function
def load_model(path):
"""Loads model and return it without DataParallel table."""
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
for key in checkpoint["state_dict"]:
print(key)
# size of the top layer
N = checkpoint["state_dict"]["decoder.out_conv.bias"].size()
# build skeleton of the model
sob = "sobel.0.weight" in checkpoint["state_dict"].keys()
model = models.__dict__[checkpoint["arch"]](sobel=sob, out=int(N[0]))
# deal with a dataparallel table
def rename_key(key):
if not "module" in key:
return key
return "".join(key.split(".module"))
checkpoint["state_dict"] = {
rename_key(key): val for key, val in checkpoint["state_dict"].items()
}
# load weights
model.load_state_dict(checkpoint["state_dict"])
print("Loaded")
else:
model = None
print("=> no checkpoint found at '{}'".format(path))
return model
def load_checkpoint(path, model, optimizer, from_ddp=False):
"""loads previous checkpoint
Args:
path (str): path to checkpoint
model (model): model to restore checkpoint to
optimizer (optimizer): torch optimizer to load optimizer state_dict to
from_ddp (bool, optional): load DistributedDataParallel checkpoint to regular model. Defaults to False.
Returns:
model, optimizer, epoch_num, loss
"""
# load checkpoint
checkpoint = torch.load(path)
# transfer state_dict from checkpoint to model
model.load_state_dict(checkpoint["state_dict"])
# transfer optimizer state_dict from checkpoint to model
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
# track loss
loss = checkpoint["loss"]
return model, optimizer, checkpoint["epoch"], loss.item()
def restore_model(logger, snapshot_path, model_num=None):
"""wrapper function to read log dir and load restore a previous checkpoint
Args:
logger (Logger): logger object (for info output to console)
snapshot_path (str): path to checkpoint directory
Returns:
model, optimizer, start_epoch, performance
"""
try:
# check if there is previous progress to be restored:
logger.info(f"Snapshot path: {snapshot_path}")
iter_num = []
name = "model_iter"
if model_num:
name = model_num
for filename in os.listdir(snapshot_path):
if name in filename:
basename, extension = os.path.splitext(filename)
iter_num.append(int(basename.split("_")[2]))
iter_num = max(iter_num)
for filename in os.listdir(snapshot_path):
if name in filename and str(iter_num) in filename:
model_checkpoint = filename
except Exception as e:
logger.warning(f"Error finding previous checkpoints: {e}")
try:
logger.info(f"Restoring model checkpoint: {model_checkpoint}")
model, optimizer, start_epoch, performance = load_checkpoint(
snapshot_path + "/" + model_checkpoint, model, optimizer
)
logger.info(f"Models restored from iteration {iter_num}")
return model, optimizer, start_epoch, performance
except Exception as e:
logger.warning(f"Unable to restore model checkpoint: {e}, using new model")
def save_checkpoint(epoch, model, optimizer, loss, path):
"""Saves model as checkpoint"""
torch.save(
{
"epoch": epoch,
"state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": loss,
},
path,
)
class UnifLabelSampler(Sampler):
"""Samples elements uniformely accross pseudolabels.
Args:
N (int): size of returned iterator.
images_lists: dict of key (target), value (list of data with this target)
"""
def __init__(self, N, images_lists):
self.N = N
self.images_lists = images_lists
self.indexes = self.generate_indexes_epoch()
def generate_indexes_epoch(self):
size_per_pseudolabel = int(self.N / len(self.images_lists)) + 1
res = np.zeros(size_per_pseudolabel * len(self.images_lists))
for i in range(len(self.images_lists)):
indexes = np.random.choice(
self.images_lists[i],
size_per_pseudolabel,
replace=(len(self.images_lists[i]) <= size_per_pseudolabel),
)
res[i * size_per_pseudolabel : (i + 1) * size_per_pseudolabel] = indexes
np.random.shuffle(res)
return res[: self.N].astype("int")
def __iter__(self):
return iter(self.indexes)
def __len__(self):
return self.N
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def learning_rate_decay(optimizer, t, lr_0):
for param_group in optimizer.param_groups:
lr = lr_0 / np.sqrt(1 + lr_0 * param_group["weight_decay"] * t)
param_group["lr"] = lr
class Logger:
"""Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), "wb") as fp:
pickle.dump(self.data, fp, -1)
def compute_sdf(img_gt, out_shape):
"""
compute the signed distance map of binary mask
input: segmentation, shape = (batch_size, x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""
img_gt = img_gt.astype(np.uint8)
normalized_sdf = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
posmask = img_gt[b].astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode="inner").astype(
np.uint8
)
sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (
posdis - np.min(posdis)
) / (np.max(posdis) - np.min(posdis))
sdf[boundary == 1] = 0
normalized_sdf[b] = sdf
# assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
# assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
return normalized_sdf
# set up process group for distributed computing
def distributed_setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
print("setting up dist process group now")
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def load_ddp_to_nddp(state_dict):
pattern = re.compile("module")
for k, v in state_dict.items():
if re.search("module", k):
model_dict[re.sub(pattern, "", k)] = v
else:
model_dict = state_dict
return model_dict
| 8,190 | 31.121569 | 111 |
py
|
SSL4MIS
|
SSL4MIS-master/code/utils/ramps.py
|
# Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Functions for ramping hyperparameters up or down
Each function takes the current training step or epoch, and the
ramp length in the same format, and returns a multiplier between
0 and 1.
"""
import numpy as np
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
| 1,319 | 30.428571 | 76 |
py
|
SSL4MIS
|
SSL4MIS-master/code/utils/metrics.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/14 下午4:41
# @Author : chuyu zhang
# @File : metrics.py
# @Software: PyCharm
import numpy as np
from medpy import metric
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction == i)
label_tmp = (label == i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / (np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
dc = metric.binary.dc(pred, gt)
jc = metric.binary.jc(pred, gt)
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return dc, jc, hd, asd
def dice(input, target, ignore_index=None):
smooth = 1.
# using clone, so that it can do change to original target.
iflat = input.clone().view(-1)
tflat = target.clone().view(-1)
if ignore_index is not None:
mask = tflat == ignore_index
tflat[mask] = 0
iflat[mask] = 0
intersection = (iflat * tflat).sum()
return (2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
| 1,289 | 26.446809 | 100 |
py
|
ipfixcol2
|
ipfixcol2-master/src/tools/pcap2flow/pcap2flow.py
|
#!/usr/bin/env python3
"""
Simple tool for replaying NetFlow v5/v9 and IPFIX packets to a collector.
Author(s):
Lukas Hutak <[email protected]>
Date: July 2019
Copyright(c) 2019 CESNET z.s.p.o.
SPDX-License-Identifier: BSD-3-Clause
"""
import argparse
import struct
from scapy.all import *
# Dictionary with Transport Sessions
sessions = {}
def process_pcap():
"""
Open PCAP and send each NetFlow/IPFIX packet to a collector.
:return: None
"""
# Try to open the file
reader = PcapReader(args.file)
cnt_total = 0
cnt_sent = 0
for pkt in reader:
cnt_total += 1
if args.verbose:
print("Processing {}. packet".format(cnt_total))
if process_packet(pkt):
cnt_sent += 1
print("{} of {} packets have been processed and sent "
"over {} Transport Session(s)!".format(cnt_sent, cnt_total, len(sessions)))
def process_packet(pkt):
"""
Extract NetFlow v5/v9 or IPFIX payload and send it as a new packet to a collector.
:param pkt: Scapy packet to process and send
:return: True if the packet has been send to the destination. False otherwise.
:rtype: bool
"""
# Determine IP adresses
l3_data = pkt.getlayer("IP")
if not l3_data:
print("Unable to locate L3 layer. Skipping...")
ip_src = l3_data.src
ip_dst = l3_data.dst
# Determine protocol of Transport Layer
l4_data = None
proto = None
port_src = None
port_dst = None
for type in ["UDP", "TCP"]:
if not l3_data.haslayer(type):
continue
l4_data = l3_data.getlayer(type)
proto = type
port_src = l4_data.sport
port_dst = l4_data.dport
break
if not proto:
if args.verbose:
print("Failed to locate L4 layer. Skipping...")
return False
# Check if the packet contains NetFlow v5/v9 or IPFIX payload
l7_data = l4_data.payload
raw_payload = l7_data.original
version = struct.unpack("!H", raw_payload[:2])[0]
if version not in [5, 9, 10]:
print("Payload doesn't contain NetFlow/IPFIX packet. Skipping...")
return False
# Send the packet
key = (ip_src, ip_dst, proto, port_src, port_dst)
send_packet(key, raw_payload)
return True
def send_packet(key, payload):
"""
Send packet to a collector.
To make sure that packets from different Transport Session (TS) are not mixed together,
the function creates and maintains independent UDP/TCP session for each original TS.
:param key: Identification of the original Transport Session
(src IP, dst IP, proto, src port, dst port)
:param payload: Raw NetFlow/IPFIX message to send
:return: None
"""
ts = sessions.get(key)
if not ts:
# Create a new Transport Session
proto = key[2]
if args.verbose:
print("Creating a new Transport Session for {}".format(key))
if args.proto != proto:
print("WARNING: Original flow packets exported over {proto_orig} "
"({src_ip}:{src_port} -> {dst_ip}:{dst_port}) are now being send over {proto_now}. "
"Collector could reject these flows due to different formatting rules!".format(
proto_orig=proto, proto_now=args.proto, src_ip=key[0], dst_ip=key[1], src_port=key[3],
dst_port=key[4]))
ts = create_socket()
sessions[key] = ts
# Send the packet
ts.sendall(payload)
def create_socket():
"""
Create a new socket and connect it to the collector.
:return: Socket
:rtype: socket.socket
"""
str2proto = {
"UDP": socket.SOCK_DGRAM,
"TCP": socket.SOCK_STREAM
}
family = socket.AF_UNSPEC
if args.v4_only:
family = socket.AF_INET
if args.v6_only:
family = socket.AF_INET6
net_proto = str2proto[args.proto]
s = None
for res in socket.getaddrinfo(args.addr, args.port, family, net_proto):
net_af, net_type, net_proto, net_cname, net_sa = res
try:
s = socket.socket(net_af, net_type, net_proto)
except socket.error as err:
s = None
continue
try:
s.connect(net_sa)
except socket.error as err:
s.close()
s = None
continue
break
if s is None:
raise RuntimeError("Failed to open socket!")
return s
def arg_check_port(value):
"""
Check if port is valid number
:param str value: String to convert
:return: Port
"""
num = int(value)
if num < 0 or num >= 2**16:
raise argparse.ArgumentTypeError("%s is not valid port number" % value)
return num
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(
description="Simple tool for replaying NetFlow v5/v9 and IPFIX packets to a collector.",
)
parser.add_argument("-i", dest="file", help="PCAP with NetFlow/IPFIX packets", required=True)
parser.add_argument("-d", dest="addr", help="Destination IP address or hostname (default: %(default)s)",
default="127.0.0.1")
parser.add_argument("-p", dest="port", help="Destination port number (default: %(default)d)",
default=4739, type=arg_check_port)
parser.add_argument("-t", dest="proto", help="Connection type (default: %(default)s)",
default="UDP", choices=["UDP", "TCP"])
parser.add_argument("-v", dest="verbose", help="Increase verbosity", default=False, action="store_true")
group = parser.add_mutually_exclusive_group()
group.add_argument("-4", dest="v4_only", help="Force the tool to send flows to an IPv4 address only",
default=False, action="store_true")
group.add_argument("-6", dest="v6_only", help="Force the tool to send flows to an IPv6 address only",
default=False, action="store_true")
args = parser.parse_args()
# Process the PCAP file
try:
process_pcap()
except Exception as err:
print("ERROR: {}".format(err))
| 6,107 | 29.237624 | 111 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes2d/test/test.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../../../script/')
sys.path.insert(0, '../../../script/analysis/')
import util
import hdf5_to_dict as io
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64, 128]
# LOOP OVER EIGENMODES
MODES = [0,1,2,3] # 1,2,3
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
var0[5] = 1.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.558104461559
dvar[1] = 0.744139282078
dvar[2] = -0.277124827421
dvar[3] = 0.0630348927707
dvar[5] = -0.164323721928
dvar[6] = 0.164323721928
if MODES[n] == 2: # ALFVEN
dvar[4] = 0.480384461415
dvar[7] = 0.877058019307
if MODES[n] == 3: # FAST
dvar[0] = 0.476395427447
dvar[1] = 0.635193903263
dvar[2] = -0.102965815319
dvar[3] = -0.316873207561
dvar[5] = 0.359559114174
dvar[6] = -0.359559114174
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in xrange(len(RES)):
print '../dumps_' + str(RES[m]) + '_' + str(MODES[n])
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = np.sort(glob.glob('dump*.h5'))[-1]
hdr = io.load_hdr(dfile)
geom = io.load_geom(hdr, dfile)
dump = io.load_dump(hdr, geom, dfile)
X1 = dump['x'][:,:,0]
X2 = dump['y'][:,:,0]
dvar_code = []
dvar_code.append(dump['RHO'][:,:,0] - var0[0])
dvar_code.append(dump['UU'][:,:,0] - var0[1])
dvar_code.append(dump['U1'][:,:,0] - var0[2])
dvar_code.append(dump['U2'][:,:,0] - var0[3])
dvar_code.append(dump['U3'][:,:,0] - var0[4])
dvar_code.append(dump['B1'][:,:,0] - var0[5])
dvar_code.append(dump['B2'][:,:,0] - var0[6])
dvar_code.append(dump['B3'][:,:,0] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# Plot dvar
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(X1[:,mid], dvar_code[k][:,mid], marker='s', label=VARS[k])
ax.plot(X1[:,mid], dvar_sol[k][:,mid], marker='s', label=(VARS[k] + " analytic"))
plt.title(NAMES[MODES[n]] + ' ' + str(RES[m]))
plt.legend(loc=1)
plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + str(RES[m]) + '.png', bbox_inches='tight')
# fig = plt.figure(figsize=(16.18,10))
# ax = fig.add_subplot(1,1,1)
# for k in xrange(NVAR):
# if abs(dvar[k]) != 0.:
# ax.plot(X1[:,mid,mid], dvar_sol[k][:,mid,mid], marker='s', label=(VARS[k] + " analytic"))
# plt.title(NAMES[MODES[n]] + ' ' + str(RES[m]) + ' analytic')
# plt.legend(loc=1)
# plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + str(RES[m]) + '_ana' + '.png', bbox_inches='tight')
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../test')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
| 4,815 | 29.871795 | 112 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes2d/test/mhdmodes2d.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../script/')
sys.path.insert(0, '../script/analysis/')
import util
import hdf5_to_dict as io
TMP_DIR = 'TMP'
TMP_BUILD = 'build_tmp.py'
util.safe_remove(TMP_DIR)
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64]#, 128]
util.make_dir(TMP_DIR)
os.chdir('../prob/mhdmodes2d/')
copyfile('build.py', TMP_BUILD)
# COMPILE CODE AT MULTIPLE RESOLUTIONS USING SEPARATE BUILD FILE
for n in xrange(len(RES)):
util.change_cparm('N1TOT', RES[n], TMP_BUILD)
util.change_cparm('N2TOT', RES[n], TMP_BUILD)
call(['python', TMP_BUILD, '-dir', TMP_DIR])
call(['cp', os.path.join(os.getcwd(), TMP_DIR, 'bhlight'),
'../../test/' + TMP_DIR + '/bhlight_' + str(RES[n])])
copyfile(os.path.join(os.getcwd(), TMP_DIR, 'param_template.dat'), '../../test/' +
TMP_DIR + '/param_template.dat')
util.safe_remove(TMP_BUILD)
util.safe_remove(TMP_DIR)
os.chdir('../../test/')
# LOOP OVER EIGENMODES
MODES = [1, 2, 3]
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
var0[5] = 1.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
util.change_rparm('nmode', MODES[n], TMP_DIR + '/param_template.dat')
os.chdir(TMP_DIR)
print os.getcwd()
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.558104461559
dvar[1] = 0.744139282078
dvar[2] = -0.277124827421
dvar[3] = 0.0630348927707
dvar[5] = -0.164323721928
dvar[6] = 0.164323721928
if MODES[n] == 2: # ALFVEN
dvar[4] = 0.480384461415
dvar[7] = 0.877058019307
if MODES[n] == 3: # FAST
dvar[0] = 0.476395427447
dvar[1] = 0.635193903263
dvar[2] = -0.102965815319
dvar[3] = -0.316873207561
dvar[5] = 0.359559114174
dvar[6] = -0.359559114174
dvar *= amp
# RUN PROBLEM FOR EACH RESOLUTION AND ANALYZE RESULT
for m in xrange(len(RES)):
print ['./bhlight_' + str(RES[m]), '-p', 'param_template.dat']
call(['./bhlight_' + str(RES[m]), '-p', 'param_template.dat'])
dfiles = np.sort(glob.glob('dumps/dump*.h5'))
dump = io.load_dump(dfiles[-1])
X1 = dump['X1'][:,:,0]
X2 = dump['X2'][:,:,0]
dvar_code = []
dvar_code.append(dump['RHO'][:,:,0] - var0[0])
dvar_code.append(dump['UU'][:,:,0] - var0[1])
dvar_code.append(dump['U1'][:,:,0] - var0[2])
dvar_code.append(dump['U2'][:,:,0] - var0[3])
dvar_code.append(dump['U3'][:,:,0] - var0[4])
dvar_code.append(dump['B1'][:,:,0] - var0[5])
dvar_code.append(dump['B2'][:,:,0] - var0[6])
dvar_code.append(dump['B3'][:,:,0] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2))
if abs(dvar[k]) != 0.:
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes2d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
# CLEAN UP
util.safe_remove(TMP_DIR)
| 4,670 | 28.942308 | 83 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes3d/test/test_3D.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../../../script/')
sys.path.insert(0, '../../../script/analysis/')
import util
import hdf5_to_dict as io
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64]#, 128]
# LOOP OVER EIGENMODES
MODES = [0,1,2,3]
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
# Magnetic field
var0[5] = 1.
var0[6] = 0.
var0[7] = 0.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.556500332363
dvar[1] = 0.742000443151
dvar[2] = -0.282334999306
dvar[3] = 0.0367010491491
dvar[4] = 0.0367010491491
dvar[5] = -0.195509141461
dvar[6] = 0.0977545707307
dvar[7] = 0.0977545707307
if MODES[n] == 2: # ALFVEN
# dvar[4] = 0.480384461415
# dvar[7] = 0.877058019307
dvar[3] = -0.339683110243
dvar[4] = 0.339683110243
dvar[6] = 0.620173672946
dvar[7] = -0.620173672946
if MODES[n] == 3: # FAST
# dvar[0] = 0.476395427447
# dvar[1] = 0.635193903263
# dvar[2] = -0.102965815319
# dvar[3] = -0.316873207561
# dvar[5] = 0.359559114174
# dvar[6] = -0.359559114174
dvar[0] = 0.481846076323;
dvar[1] = 0.642461435098;
dvar[2] = -0.0832240462505;
dvar[3] = -0.224080007379;
dvar[4] = -0.224080007379;
dvar[5] = 0.406380545676;
dvar[6] = -0.203190272838;
dvar[7] = -0.203190272838;
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in xrange(len(RES)):
print '../dumps_' + str(RES[m]) + '_' + str(MODES[n])
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = np.sort(glob.glob('dump*.h5'))[-1]
hdr = io.load_hdr(dfile)
geom = io.load_geom(hdr, dfile)
dump = io.load_dump(hdr, geom, dfile)
X1 = dump['x']
X2 = dump['y']
X3 = dump['z']
dvar_code = []
dvar_code.append(dump['RHO'] - var0[0])
dvar_code.append(dump['UU'] - var0[1])
dvar_code.append(dump['U1'] - var0[2])
dvar_code.append(dump['U2'] - var0[3])
dvar_code.append(dump['U3'] - var0[4])
dvar_code.append(dump['B1'] - var0[5])
dvar_code.append(dump['B2'] - var0[6])
dvar_code.append(dump['B3'] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2 + k3*X3))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# Plot slice at each timestep
# for fnum in xrange(len(np.sort(glob.glob('dump*.h5')))):
# dfile = np.sort(glob.glob('dump*.h5'))[fnum]
#
# hdr = io.load_hdr(dfile)
# geom = io.load_geom(hdr, dfile)
# dump = io.load_dump(hdr, geom, dfile)
#
# X1 = dump['x']
# X2 = dump['y']
# X3 = dump['z']
#
# dvar_code = []
# dvar_code.append(dump['RHO'] - var0[0])
# dvar_code.append(dump['UU'] - var0[1])
# dvar_code.append(dump['U1'] - var0[2])
# dvar_code.append(dump['U2'] - var0[3])
# dvar_code.append(dump['U3'] - var0[4])
# dvar_code.append(dump['B1'] - var0[5])
# dvar_code.append(dump['B2'] - var0[6])
# dvar_code.append(dump['B3'] - var0[7])
#
# dvar_plane = []
# for k in xrange(NVAR):
# dvar_plane.append(np.zeros((dump['N1'], dump['N2'])))
# for i in xrange(dump['N1']):
# for j in xrange(dump['N2']):
# dvar_plane[k][i,j] = dvar_code[k][i,j,int(i/2 + j/2)]
#
# # Plot dvar
# for k in xrange(NVAR):
# if abs(dvar[k]) != 0.:
# fig = plt.figure(figsize=(16.18,10))
# ax = fig.add_subplot(1,1,1)
# ax.pcolormesh(X1[:,:,mid], X2[:,:,mid], dvar_code[k][:,:,mid], label=VARS[k])
# #ax.plot(X1[:,mid,mid], dvar_sol[k][:,mid,mid], marker='s', label=(VARS[k] + " analytic"))
# plt.title(NAMES[MODES[n]] + ' ' + VARS[k] + ' ' + str(RES[m]))
# plt.legend(loc=1)
# plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + VARS[k] + '_' + str(RES[m]) + '_' + str(fnum) + '.png', bbox_inches='tight')
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../test')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
| 5,945 | 29.182741 | 143 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes3d/test/mhdmodes3d.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../../../script/')
sys.path.insert(0, '../../../script/analysis/')
import util
import hdf5_to_dict as io
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64]#, 128]
# LOOP OVER EIGENMODES
MODES = [0,1,2,3] # 1,2,3
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
var0[5] = 1.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.556500332363
dvar[1] = 0.742000443151
dvar[2] = -0.282334999306
dvar[3] = 0.0367010491491
dvar[4] = 0.0367010491491
dvar[5] = -0.195509141461
dvar[6] = 0.0977545707307
dvar[7] = 0.0977545707307
if MODES[n] == 2: # ALFVEN
dvar[4] = 0.480384461415
dvar[7] = 0.877058019307
if MODES[n] == 3: # FAST
dvar[0] = 0.476395427447
dvar[1] = 0.635193903263
dvar[2] = -0.102965815319
dvar[3] = -0.316873207561
dvar[5] = 0.359559114174
dvar[6] = -0.359559114174
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in xrange(len(RES)):
print '../dumps_' + str(RES[m]) + '_' + str(MODES[n])
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = np.sort(glob.glob('dump*.h5'))[-1]
hdr = io.load_hdr(dfile)
geom = io.load_geom(hdr, dfile)
dump = io.load_dump(hdr, geom, dfile)
#X1 = dump['X1'][:,:,:]
#X2 = dump['X2'][:,:,:]
#X3 = dump['X3'][:,:,:]
X1 = dump['x'][:,:,:]
X2 = dump['y'][:,:,:]
X3 = dump['z'][:,:,:]
dvar_code = []
dvar_code.append(dump['RHO'][:,:,:] - var0[0])
dvar_code.append(dump['UU'][:,:,:] - var0[1])
dvar_code.append(dump['U1'][:,:,:] - var0[2])
dvar_code.append(dump['U2'][:,:,:] - var0[3])
dvar_code.append(dump['U3'][:,:,:] - var0[4])
dvar_code.append(dump['B1'][:,:,:] - var0[5])
dvar_code.append(dump['B2'][:,:,:] - var0[6])
dvar_code.append(dump['B3'][:,:,:] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2 + k3*X3))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# Plot dvar
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(X1[:,mid,mid], dvar_code[k][:,mid,mid], marker='s', label=VARS[k])
ax.plot(X1[:,mid,mid], dvar_sol[k][:,mid,mid], marker='s', label=(VARS[k] + " analytic"))
plt.title(NAMES[MODES[n]] + ' ' + str(RES[m]))
plt.legend(loc=1)
plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + str(RES[m]) + '.png', bbox_inches='tight')
# fig = plt.figure(figsize=(16.18,10))
# ax = fig.add_subplot(1,1,1)
# for k in xrange(NVAR):
# if abs(dvar[k]) != 0.:
# ax.plot(X1[:,mid,mid], dvar_sol[k][:,mid,mid], marker='s', label=(VARS[k] + " analytic"))
# plt.title(NAMES[MODES[n]] + ' ' + str(RES[m]) + ' analytic')
# plt.legend(loc=1)
# plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + str(RES[m]) + '_ana' + '.png', bbox_inches='tight')
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../test')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
| 5,017 | 29.785276 | 112 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes3d/test/test_faux2D.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../../../script/')
sys.path.insert(0, '../../../script/analysis/')
import util
import hdf5_to_dict as io
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64]#, 128]
dir = 1
# LOOP OVER EIGENMODES
MODES = [1,2,3] # 1,2,3
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
# Choose background B-field direction based on propagation direction
if dir == 1:
var0[6] = 1.
elif dir == 2:
var0[7] = 1.
elif dir == 3:
var0[5] = 1.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
# EIGENMODES FAUX-2D
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.558104461559
dvar[1] = 0.744139282078
if dir == 1:
dvar[3] = -0.277124827421
dvar[4] = 0.0630348927707
dvar[6] = -0.164323721928
dvar[7] = 0.164323721928
if dir == 2:
dvar[4] = -0.277124827421
dvar[2] = 0.0630348927707
dvar[7] = -0.164323721928
dvar[5] = 0.164323721928
if dir == 3:
dvar[2] = -0.277124827421
dvar[3] = 0.0630348927707
dvar[5] = -0.164323721928
dvar[6] = 0.164323721928
if MODES[n] == 2: # ALFVEN
if dir == 1:
dvar[2] = 0.480384461415
dvar[5] = 0.877058019307
elif dir == 2:
dvar[3] = 0.480384461415
dvar[6] = 0.877058019307
elif dir == 3:
dvar[4] = 0.480384461415
dvar[7] = 0.877058019307
if MODES[n] == 3: # FAST
dvar[0] = 0.476395427447
dvar[1] = 0.635193903263
if dir == 1:
dvar[3] = -0.102965815319
dvar[4] = -0.316873207561
dvar[6] = 0.359559114174
dvar[7] = -0.359559114174
if dir == 2:
dvar[4] = -0.102965815319
dvar[2] = -0.316873207561
dvar[7] = 0.359559114174
dvar[5] = -0.359559114174
if dir == 3:
dvar[2] = -0.102965815319
dvar[3] = -0.316873207561
dvar[5] = 0.359559114174
dvar[6] = -0.359559114174
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in xrange(len(RES)):
print '../dumps_' + str(RES[m]) + '_' + str(MODES[n])
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = np.sort(glob.glob('dump*.h5'))[-1]
hdr = io.load_hdr(dfile)
geom = io.load_geom(hdr, dfile)
dump = io.load_dump(hdr, geom, dfile)
X1 = dump['x']
X2 = dump['y']
X3 = dump['z']
dvar_code = []
dvar_code.append(dump['RHO'] - var0[0])
dvar_code.append(dump['UU'] - var0[1])
dvar_code.append(dump['U1'] - var0[2])
dvar_code.append(dump['U2'] - var0[3])
dvar_code.append(dump['U3'] - var0[4])
dvar_code.append(dump['B1'] - var0[5])
dvar_code.append(dump['B2'] - var0[6])
dvar_code.append(dump['B3'] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
if dir == 1:
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X2 + k2*X3))
elif dir == 2:
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X3))
elif dir == 3:
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# geom_loaded = False
# # Plot each file
# if RES[m] == 64:
# for fnum in xrange(len(np.sort(glob.glob('dump*.h5')))):
# dfile = np.sort(glob.glob('dump*.h5'))[fnum]
#
# if not geom_loaded:
# hdr = io.load_hdr(dfile)
# geom = io.load_geom(hdr, dfile)
# dump = io.load_dump(hdr, geom, dfile)
#
# X1 = dump['x']
# X2 = dump['y']
# X3 = dump['z']
#
# dvar_code = []
# dvar_code.append(dump['RHO'] - var0[0])
# dvar_code.append(dump['UU'] - var0[1])
# dvar_code.append(dump['U1'] - var0[2])
# dvar_code.append(dump['U2'] - var0[3])
# dvar_code.append(dump['U3'] - var0[4])
# dvar_code.append(dump['B1'] - var0[5])
# dvar_code.append(dump['B2'] - var0[6])
# dvar_code.append(dump['B3'] - var0[7])
#
# # Plot dvar direct
# for k in xrange(NVAR):
# if abs(dvar[k]) != 0.:
# fig = plt.figure(figsize=(16.18,10))
# ax = fig.add_subplot(1,1,1)
# if dir == 1:
# ax.pcolormesh(X2[mid,:,:], X3[mid,:,:], dvar_code[k][mid,:,:], label=VARS[k])
# elif dir == 2:
# ax.pcolormesh(X1[:,mid,:], X3[:,mid,:], dvar_code[k][:,mid,:], label=VARS[k])
# elif dir == 3:
# ax.pcolormesh(X1[:,:,mid], X2[:,:,mid], dvar_code[k][:,:,mid], label=VARS[k])
# #ax.plot(X1[:,mid,mid], dvar_sol[k][:,mid,mid], marker='s', label=(VARS[k] + " analytic"))
# plt.title(NAMES[MODES[n]] + ' ' + VARS[k] + ' ' + str(RES[m]))
# plt.legend(loc=1)
# plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + VARS[k] + '_' + str(RES[m]) + '_' + str(fnum) + '.png', bbox_inches='tight')
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../test')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
| 6,783 | 29.285714 | 145 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes1d/test/test.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../../../script/')
sys.path.insert(0, '../../../script/analysis/')
import util
import hdf5_to_dict as io
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64, 128, 256]
# LOOP OVER EIGENMODES
MODES = [0,1,2,3] # 1,2,3
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
var0[5] = 1.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.580429492464
dvar[1] = 0.773905989952
dvar[2] = -0.253320198552
if MODES[n] == 2: # ALFVEN
dvar[3] = 0.480384461415
dvar[6] = 0.877058019307
if MODES[n] == 3: # FAST
dvar[4] = 0.480384461415
dvar[7] = 0.877058019307
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in xrange(len(RES)):
print '../dumps_' + str(RES[m]) + '_' + str(MODES[n])
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = np.sort(glob.glob('dump*.h5'))[-1]
hdr = io.load_hdr(dfile)
geom = io.load_geom(hdr, dfile)
dump = io.load_dump(hdr, geom, dfile)
X1 = dump['x'][:,0,0]
X2 = dump['y'][:,0,0]
dvar_code = []
dvar_code.append(dump['RHO'][:,0,0] - var0[0])
dvar_code.append(dump['UU'][:,0,0] - var0[1])
dvar_code.append(dump['U1'][:,0,0] - var0[2])
dvar_code.append(dump['U2'][:,0,0] - var0[3])
dvar_code.append(dump['U3'][:,0,0] - var0[4])
dvar_code.append(dump['B1'][:,0,0] - var0[5])
dvar_code.append(dump['B2'][:,0,0] - var0[6])
dvar_code.append(dump['B3'][:,0,0] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1))
if abs(dvar[k]) != 0.:
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
# Plot dvar
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(X1, dvar_code[k], marker='s', label=VARS[k])
ax.plot(X1, dvar_sol[k], marker='s', label=(VARS[k] + " analytic"))
plt.title(NAMES[MODES[n]] + ' ' + str(RES[m]))
plt.legend(loc=1)
plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + str(RES[m]) + '.png', bbox_inches='tight')
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../test')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes1d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
| 4,145 | 28.404255 | 101 |
py
|
iharm3d
|
iharm3d-master/prob/old_problems/mhdmodes1d/test/mhdmodes1d.py
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../script/')
sys.path.insert(0, '../script/analysis/')
import util
import hdf5_to_dict as io
TMP_DIR = 'TMP'
TMP_BUILD = 'build_tmp.py'
util.safe_remove(TMP_DIR)
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64, 128, 256, 512, 1024]
util.make_dir(TMP_DIR)
os.chdir('../prob/mhdmodes1d/')
copyfile('build.py', TMP_BUILD)
# COMPILE CODE AT MULTIPLE RESOLUTIONS USING SEPARATE BUILD FILE
for n in xrange(len(RES)):
util.change_cparm('N1TOT', RES[n], TMP_BUILD)
#util.change_cparm('RECONSTRUCTION', 'PARA', TMP_BUILD)
call(['python', TMP_BUILD, '-dir', TMP_DIR])
call(['cp', os.path.join(os.getcwd(), TMP_DIR, 'bhlight'),
'../../test/' + TMP_DIR + '/bhlight_' + str(RES[n])])
copyfile(os.path.join(os.getcwd(), TMP_DIR, 'param.dat'), '../../test/' +
TMP_DIR + '/param.dat')
util.safe_remove(TMP_BUILD)
util.safe_remove(TMP_DIR)
os.chdir('../../test/')
# LOOP OVER EIGENMODES
MODES = [0, 1, 2, 3]
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
var0[5] = 1.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
util.change_rparm('nmode', MODES[n], TMP_DIR + '/param.dat')
os.chdir(TMP_DIR)
print os.getcwd()
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.580429492464
dvar[1] = 0.773905989952
dvar[2] = -0.253320198552
if MODES[n] == 2: # ALFVEN
dvar[3] = 0.480384461415
dvar[6] = 0.877058019307
if MODES[n] == 3: # FAST
dvar[4] = 0.480384461415
dvar[7] = 0.877058019307
dvar *= amp
# RUN PROBLEM FOR EACH RESOLUTION AND ANALYZE RESULT
for m in xrange(len(RES)):
print ['./bhlight_' + str(RES[m]), '-p', 'param.dat']
call(['./bhlight_' + str(RES[m]), '-p', 'param.dat'])
dfiles = np.sort(glob.glob('dumps/dump*.h5'))
dump = io.load_dump(dfiles[-1])
X1 = dump['X1'][:,0,0]
X2 = dump['X2'][:,0,0]
dvar_code = []
dvar_code.append(dump['RHO'][:,0,0] - var0[0])
dvar_code.append(dump['UU'][:,0,0] - var0[1])
dvar_code.append(dump['U1'][:,0,0] - var0[2])
dvar_code.append(dump['U2'][:,0,0] - var0[3])
dvar_code.append(dump['U3'][:,0,0] - var0[4])
dvar_code.append(dump['B1'][:,0,0] - var0[5])
dvar_code.append(dump['B2'][:,0,0] - var0[6])
dvar_code.append(dump['B3'][:,0,0] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1))
if abs(dvar[k]) != 0.:
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes1d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
# CLEAN UP
util.safe_remove(TMP_DIR)
| 4,422 | 28.885135 | 80 |
py
|
iharm3d
|
iharm3d-master/script/test/convergence/plot_convergence_bondi.py
|
################################################################################
# #
# BONDI INFLOW CONVERGENCE PLOTS #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
RES = [32, 64, 128, 256]
NVAR = 8
L1 = np.zeros(len(RES))
# RUN PROBLEM FOR EACH RESOLUTION AND ANALYZE RESULT
for m in range(len(RES)):
os.chdir('../dumps_' + str(RES[m]))
dfiles = io.get_dumps_list(".")
hdr, geom, dump0 = io.load_all(dfiles[0])
dump1 = io.load_dump(dfiles[-1], hdr, geom)
r = geom['r'][:,hdr['n2']//2,0]
# print("r_eh is {}".format(hdr['r_eh']))
imin = 0
while r[imin] < hdr['r_eh']:
imin += 1
rho0 = np.mean(dump0['RHO'][imin:,:,0], axis=1)
rho1 = np.mean(dump1['RHO'][imin:,:,0], axis=1)
L1[m] = np.mean(np.fabs(rho1 - rho0))
# MEASURE CONVERGENCE
powerfit = np.polyfit(np.log(RES), np.log(L1), 1)[0]
print("Powerfit: {} L1: {}".format(powerfit, L1))
os.chdir('../plots/')
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
ax.plot(RES, L1, marker='s', label='RHO')
amp = 1.0e-3
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title("BONDI")
plt.legend(loc=1)
plt.savefig('bondi.png', bbox_inches='tight')
| 1,796 | 25.820896 | 80 |
py
|
iharm3d
|
iharm3d-master/script/test/convergence/plot_convergence_modes.py
|
################################################################################
# #
# MHD MODES CONVERGENCE PLOTS #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
RES = [16,32,64] #,128]
# LOOP OVER EIGENMODES
MODES = [1,2,3]
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
# Magnetic field
var0[5] = 1.
var0[6] = 0.
var0[7] = 0.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in range(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.556500332363
dvar[1] = 0.742000443151
dvar[2] = -0.282334999306
dvar[3] = 0.0367010491491
dvar[4] = 0.0367010491491
dvar[5] = -0.195509141461
dvar[6] = 0.0977545707307
dvar[7] = 0.0977545707307
if MODES[n] == 2: # ALFVEN
dvar[3] = -0.339683110243
dvar[4] = 0.339683110243
dvar[6] = 0.620173672946
dvar[7] = -0.620173672946
if MODES[n] == 3: # FAST
dvar[0] = 0.481846076323;
dvar[1] = 0.642461435098;
dvar[2] = -0.0832240462505;
dvar[3] = -0.224080007379;
dvar[4] = -0.224080007379;
dvar[5] = 0.406380545676;
dvar[6] = -0.203190272838;
dvar[7] = -0.203190272838;
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in range(len(RES)):
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = io.get_dumps_list(".")[-1]
hdr, geom, dump = io.load_all(dfile)
X1 = geom['x']
X2 = geom['y']
X3 = geom['z']
dvar_code = []
dvar_code.append(dump['RHO'] - var0[0])
dvar_code.append(dump['UU'] - var0[1])
dvar_code.append(dump['U1'] - var0[2])
dvar_code.append(dump['U2'] - var0[3])
dvar_code.append(dump['U3'] - var0[4])
dvar_code.append(dump['B1'] - var0[5])
dvar_code.append(dump['B2'] - var0[6])
dvar_code.append(dump['B3'] - var0[7])
dvar_sol = []
for k in range(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2 + k3*X3))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# MEASURE CONVERGENCE
for k in range(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../plots')
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in range(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
| 3,453 | 26.412698 | 80 |
py
|
iharm3d
|
iharm3d-master/script/analysis/eht_plot.py
|
################################################################################
# #
# PLOTS OF VARIABLES COMPUTED IN eht_analysis.py #
# #
################################################################################
import matplotlib
import os, sys
import numpy as np
import pickle
import util
import units
from analysis_fns import *
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# For radials
FIGX = 10
FIGY = 10
# For flux plots; per-plot Y dim
PLOTY = 3
SIZE = 40
RADS = True
FLUXES = True
EXTRAS = True
DIAGS = True
OMEGA = False
FLUX_PROF = False
TEMP = False
BSQ = False
MFLUX = False
BFLUX = True
TH_PROFS = True
CFUNCS = True
PSPECS = True
LCS = True
COMPARE = False
PDFS = True
JSQ = True
def i_of(var, coord):
i = 0
while var[i] < coord:
i += 1
i -= 1
return i
# Return the portion of a variable which constitutes quiescence
def qui(avg, vname):
if 'avg_start' in avg:
istart = i_of(avg['t'], avg['avg_start'])
else:
istart = i_of(avg['t'], 5000)
if 'avg_end' in avg:
iend = i_of(avg['t'], avg['avg_end'])
else:
iend = i_of(avg['t'], 10000)
return avg[vname][istart:iend]
def print_av_var(vname, tag=None):
if tag:
print(tag+":")
else:
print(vname+":")
for label,avg in zip(labels,avgs):
if vname in avg:
var_av = np.abs(np.mean(qui(avg,vname)))
var_std = np.std(qui(avg,vname))
print("{}: avg {:.3}, std abs {:.3} rel {:.3}".format(label, var_av, var_std, var_std/var_av))
def plot_multi(ax, iname, varname, varname_pretty, logx=False, logy=False, xlim=None, ylim=None, timelabels=False, label_list=None, linestyle='-'):
if label_list is None: label_list = labels
for i, avg in enumerate(avgs):
if varname in avg:
if avg[iname].size > avg[varname].size:
# Some vars are only to half in theta
ax.plot(avg[iname][:avg[iname].size//2], avg[varname], styles[i]+linestyle, label=label_list[i])
else:
ax.plot(avg[iname][np.nonzero(avg[varname])], avg[varname][np.nonzero(avg[varname])], styles[i]+linestyle, label=label_list[i])
if iname == 't':
startx = (avg['avg_start'] - ti) / (tf - ti)
endx = (avg['avg_end'] - ti) / (tf - ti)
ax.axhline(np.mean(qui(avg, varname)), startx, endx, color=styles[i], linestyle='--')
# Plot additions
if logx: ax.set_xscale('log')
if logy: ax.set_yscale('log')
if ylim is not None: ax.set_ylim(ylim)
if xlim is not None: ax.set_xlim(xlim)
ax.grid(True)
ax.set_ylabel(varname_pretty)
# Defaults and labels for different plot types:
if iname == 't':
if xlim is None:
ax.set_xlim([ti,tf])
if timelabels:
ax.set_xlabel('t/M')
else:
ax.set_xticklabels([])
elif 'freq' in iname:
if timelabels:
ax.set_xlabel('Frequency (1/M)')
else:
ax.set_xticklabels([])
elif 'lambda' in iname:
if timelabels:
ax.set_xlabel('Correlation time (M)')
else:
ax.set_xticklabels([])
elif iname == 'th':
ax.set_xlabel(r"$\theta$")
elif iname == 'r':
ax.set_xlabel("r")
if xlim is None:
ax.set_xlim([0,50]) # For EHT comparison
if logy:
ylim = ax.get_ylim()
if ylim[0] < 1e-5*ylim[1]:
ax.set_ylim([1e-5*ylim[1], ylim[1]])
def plot_temp():
fig, ax = plt.subplots(1,1, figsize=(FIGX, FIGY))
if avgs[0]['r'][-1] > 50:
txlim = [1e0, 1e3]
else:
txlim = [1e0, 1e2]
fit_labs = []
for i,avg in enumerate(avgs):
cgs = units.get_cgs()
# We can't very well plot a temp we don't know
if 'Pg_r' not in avg:
return
avg['Tp_r'] = cgs['MP'] * avg['Pg_r'] / (cgs['KBOL'] * avg['rho_r']) * cgs['CL']**2
# Add the fits. Aaaaaalll the fits
x = avg['r'][i_of(avg['r'], 3):i_of(avg['r'], 30)]
y = avg['Tp_r'][i_of(avg['r'], 3):i_of(avg['r'], 30)]
coeffs = np.polyfit(np.log(x), np.log(y), deg=1)
poly = np.poly1d(coeffs)
yfit = lambda xf: np.exp(poly(np.log(xf)))
avg['r_fit'] = x
avg['Tp_r_fit'] = yfit(x)
fit_lab = r"{:.2g} * r^{:.2g}".format(np.exp(coeffs[1]), coeffs[0])
print(labels[i], " Ti fit: ", fit_lab)
fit_labs.append(fit_lab)
# Plot the profiles themselves
plot_multi(ax, 'r', 'Tp_r', r"$<T_{i}>$", logx=True, xlim=txlim, logy=True)
plot_multi(ax, 'r_fit', 'Tp_r_fit', r"$<T_{i}>$", logx=True, xlim=txlim, logy=True, label_list=fit_labs, linestyle='--')
if len(labels) > 1:
ax.legend(loc='lower right')
else:
ax.set_title(labels[0])
plt.savefig(fname_out + "_Ti.png")
plt.close(fig)
def plot_bsq_rise():
nplot = 1
fig, ax = plt.subplots(nplot,1, figsize=(FIGX, nplot*PLOTY))
for avg in avgs:
if 'B_rt' in avg:
avg['MagE'] = np.mean(avg['B_rt']**2, axis=-1)
plot_multi(ax, 't', 'MagE', r"$<B^2>$", logy=True, xlim=[0,10000])
if len(labels) > 1:
ax.legend(loc='lower right')
else:
ax.set_title(labels[0])
plt.savefig(fname_out + '_bsq_rise.png')
plt.close(fig)
def plot_ravgs():
fig, ax = plt.subplots(3, 3, figsize=(FIGX, FIGY))
for avg in avgs:
if 'beta_r' in avg:
avg['betainv_r'] = 1/avg['beta_r']
if 'Pg_r' in avg:
avg['Tp_r'] = avg['Pg_r'] / avg['rho_r']
if 'B_r' in avg:
avg['sigma_r'] = avg['B_r']**2 / avg['rho_r']
plot_multi(ax[0, 0], 'r', 'rho_r', r"$<\rho>$", logy=True) #, ylim=[1.e-2, 1.e0])
plot_multi(ax[0, 1], 'r', 'Pg_r', r"$<P_g>$", logy=True) #, ylim=[1.e-6, 1.e-2])
plot_multi(ax[0, 2], 'r', 'Ptot_r', r"$<P_{tot}>$", logy=True) #, ylim=[1.e-6, 1.e-2])
plot_multi(ax[1, 0], 'r', 'B_r', r"$<|B|>$", logy=True) #, ylim=[1.e-4, 1.e-1])
plot_multi(ax[1, 1], 'r', 'u^phi_r', r"$<u^{\phi}>$", logy=True) #, ylim=[1.e-3, 1.e1])
plot_multi(ax[1, 2], 'r', 'u_phi_r', r"$<u_{\phi}>$", logy=True) #, ylim=[1.e-3, 1.e1])
plot_multi(ax[2, 0], 'r', 'Tp_r', r"$<T>$", logy=True) #, ylim=[1.e-6, 1.e-2])
plot_multi(ax[2, 1], 'r', 'betainv_r', r"$<\beta^{-1}>$", logy=True) #, ylim=[1.e-2, 1.e1])
plot_multi(ax[2, 2], 'r', 'sigma_r', r"$<\sigma>$", logy=True) #, ylim=[1.e-2, 1.e1])
if len(labels) > 1:
ax[0, -1].legend(loc='upper right')
else:
fig.suptitle(labels[0])
#pad = 0.05
#plt.subplots_adjust(left=pad, right=1-pad/2, bottom=pad, top=1-pad)
plt.subplots_adjust(wspace=0.35)
plt.savefig(fname_out + '_ravgs.png')
plt.close(fig)
def plot_mflux():
fig, ax = plt.subplots(2,2, figsize=(0.66*FIGX, 0.66*FIGY))
for avg in avgs:
if 'outflow_r' in avg:
avg['outflow_r'] /= avg['Mdot_av']
if 'FM_jet_r' in avg:
avg['fm_jet_r'] = avg['FM_jet_r']/avg['Mdot_av']
if 'FM_r' in avg:
avg['fm_r'] = avg['FM_r']/avg['Mdot_av']
plot_multi(ax[0,0], 'r', 'outflow_r', r"$\frac{FM_{out}}{\langle \dot{M} \rangle}$", ylim=[0,3], xlim=[1,30], logx=True)
plot_multi(ax[1,0], 'r', 'fm_jet_r', r"$\frac{FM_{jet}}{\langle \dot{M} \rangle}$", xlim=[1,1000], logx=True)
plot_multi(ax[1,1], 'r', 'fm_r', r"$\frac{FM_{tot}}{\langle \dot{M} \rangle}$", xlim=[1,1000], logx=True)
if len(labels) > 1:
ax[0,0].legend(loc='upper right')
else:
fig.suptitle(labels[0])
plt.savefig(fname_out + '_Mfluxr.png')
plt.close(fig)
def plot_Bflux():
fig, ax = plt.subplots(2,2, figsize=(FIGX, FIGY))
for avg in avgs:
if ('Phi_sph_r' in avg) and ('Phi_mid_r' in avg):
avg['phi_sph_r'] = avg['Phi_sph_r'] / avg['Mdot_av']
avg['phi_mid_r'] = avg['Phi_mid_r'] / avg['Mdot_av']
avg['phi_diff_r'] = avg['phi_sph_r'] - avg['phi_mid_r']
if 'rho_r' in avgs:
avg['rho_enc_r'] = np.zeros_like(avg['rho_r'])
for i in range(avg['rho_r'].size):
avg['rho_enc_r'][i] = np.sum(avg['rho_r'][:i])
avg['phi_brnorm_r'] = avg['Phi_mid_r'] / avg['rho_enc_r']
plot_multi(ax[0, 0], 'r', 'phi_sph_r', r"$\frac{\Phi_{sph}}{\langle \dot{M} \rangle}$")
plot_multi(ax[0, 1], 'r', 'phi_mid_r', r"$\frac{\Phi_{mid}}{\langle \dot{M} \rangle}$")
plot_multi(ax[1, 0], 'r', 'phi_diff_r', r"$\frac{\Phi_{sph} - \Phi_{mid}}{\langle \dot{M} \rangle}$")
plot_multi(ax[1, 0], 'r', 'phi_brnorm_r', r"$\frac{\Phi_{mid}}{\rho_{enc}}$")
if len(labels) > 1:
ax[0,0].legend(loc='upper right')
else:
fig.suptitle(labels[0])
pad = 0.05
plt.subplots_adjust(left=pad, right=1-pad/2, bottom=pad, top=1-pad)
plt.savefig(fname_out + '_Bfluxr.png')
plt.close(fig)
def plot_fluxes():
nplot = 7
fig,ax = plt.subplots(nplot, 1, figsize=(FIGX, nplot*PLOTY))
plot_multi(ax[0], 't', 'Mdot', r"$\dot{M}$")
plot_multi(ax[1], 't', 'Phi_b', r"$\Phi$")
plot_multi(ax[2], 't', 'Ldot', r"$\dot{L}$")
for avg in avgs:
if 'Edot' in avg.keys() and 'Mdot' in avg.keys():
avg['MmE'] = avg['Mdot'] - avg['Edot']
plot_multi(ax[3], 't', 'MmE', r"$\dot{M} - \dot{E}$")
for avg in avgs:
if 'LBZ_bg1' in avg.keys():
avg['aLBZ'] = np.abs(avg['LBZ_bg1'])
plot_multi(ax[4], 't', 'aLBZ', "BZ Luminosity")
for avg in avgs:
if 'Lj_bg1' in avg.keys():
avg['aLj'] = np.abs(avg['Lj_bg1'])
plot_multi(ax[5], 't', 'aLj', "Jet Luminosity", timelabels=True)
#plot_multi(ax[6], 't', 'Lum', "Luminosity proxy", timelabels=True)
if len(labels) > 1:
ax[0].legend(loc='upper left')
else:
ax[0].set_title(labels[0])
plt.savefig(fname_out + '_fluxes.png')
plt.close(fig)
for avg in avgs:
if 'Mdot' not in avg.keys():
avg['Mdot_av'] = 1
else:
avg['Mdot_av'] = np.mean(qui(avg,'Mdot'))
nplot = 3
fig, ax = plt.subplots(nplot,1, figsize=(FIGX, nplot*PLOTY))
# plot_multi(ax[0], 't', 'Mdot', r"$\dot{M}$")
# print_av_var('Mdot')
for avg in avgs:
if 'Phi_b' in avg.keys():
avg['phi_b'] = avg['Phi_b']/np.sqrt(avg['Mdot_av'])
plot_multi(ax[0], 't', 'phi_b', r"$\frac{\Phi_{BH}}{\sqrt{\langle \dot{M} \rangle}}$")
print_av_var('phi_b', "Normalized Phi_BH")
for avg in avgs:
if 'Ldot' in avg.keys():
avg['ldot'] = np.fabs(avg['Ldot'])/avg['Mdot_av']
plot_multi(ax[1], 't', 'ldot', r"$\frac{Ldot}{\langle \dot{M} \rangle}$")
print_av_var('ldot', "Normalized Ldot")
# for avg in avgs:
# if 'Edot' in avg.keys():
# avg['mmE'] = (avg['Mdot'] - avg['Edot'])/avg['Mdot_av']
# plot_multi(ax[3], 't', 'mmE', r"$\frac{\dot{M} - \dot{E}}{\langle \dot{M} \rangle}$")
# print_av_var('mmE', "Normalized Mdot-Edot")
for avg in avgs:
if 'Edot' in avg.keys():
avg['edot'] = avg['Edot']/avg['Mdot_av']
plot_multi(ax[2], 't', 'edot', r"$\frac{\dot{E}}{\langle \dot{M} \rangle}$", timelabels=True)
print_av_var('edot', "Normalized Edot")
for avg in avgs:
if 'aLBZ' in avg.keys() and 'Mdot' in avg.keys():
avg['alBZ'] = avg['aLBZ']/avg['Mdot_av']
# plot_multi(ax[5], 't', 'alBZ', r"$\frac{L_{BZ}}{\langle \dot{M} \rangle}$")
# print_av_var('alBZ', "Normalized BZ Jet Power")
for avg in avgs:
if 'aLj' in avg.keys() and 'Mdot' in avg.keys():
avg['alj'] = avg['aLj']/avg['Mdot_av']
# plot_multi(ax[6], 't', 'alj', r"$\frac{L_{jet}}{\langle \dot{M} \rangle}$")
# print_av_var('alj', "Normalized Total Jet Power")
# for avg in avgs:
# if 'Lum' in avg.keys():
# avg['lum'] = np.fabs(avg['Lum'])/avg['Mdot_av']
# plot_multi(ax[7], 't', 'lum', r"$\frac{Lum}{|\dot{M}|}$", timelabels=True)
# print_av_var('lum', "Normalized Luminosity Proxy")
if len(labels) > 1:
ax[0].legend(loc='upper left')
else:
ax[0].set_title(labels[0])
plt.savefig(fname_out + '_normfluxes.png')
plt.close(fig)
def plot_pspecs():
spec_keys = ['mdot', 'phi', 'edot', 'ldot', 'alBZ', 'alj', 'lightcurve']
pretty_keys = [r"$\dot{M}$",
r"$\frac{\Phi_{BH}}{\sqrt{\langle \dot{M} \rangle}}$",
r"$\frac{\dot{E}}{\langle \dot{M} \rangle}$",
r"$\frac{\dot{L}}{\langle \dot{M} \rangle}$",
r"$\frac{L_{BZ}}{\langle \dot{M} \rangle}$",
r"$\frac{L_{jet}}{\langle \dot{M} \rangle}$",
r"ipole lightcurve"
]
nplot = len(spec_keys)
fig, ax = plt.subplots((nplot+1)//2, 2, figsize=(1.5*FIGX, nplot*PLOTY))
for avg in avgs:
for key in spec_keys:
# Use the diag version if available for higher time res
if 'diags' in avg and avg['diags'] is not None and key in avg['diags']:
avg[key+'_pspec'], avg[key+'_ps_freq'] = pspec(avg['diags'][key], avg['diags']['t'])
elif key in avg:
avg[key+'_pspec'], avg[key + '_ps_freq'] = pspec(avg[key], avg['t'])
# If these happened add a normalized version
if key+'_pspec' in avg:
avg[key+'_pspec_f2'] = avg[key+'_pspec'] * avg[key + '_ps_freq']**2
for i,key in enumerate(spec_keys):
if key+'_pspec' in avgs[-1]:
# psmax = np.max(avgs[-1][key+'_pspec'])
# fmax = np.max(avgs[-1][key + '_ps_freq'])
plot_multi(ax[i//2, i%2], key+'_ps_freq', key+'_pspec_f2', pretty_keys[i],
logx=True, #xlim=[1e-5 * fmax, fmax],
logy=True, #ylim=[1e-8 * psmax, psmax],
timelabels=True)
if len(labels) > 1:
ax[0, 0].legend(loc='upper right')
else:
fig.suptitle(labels[0])
plt.savefig(fname_out + '_pspecs.png')
plt.close(fig)
def plot_lcs():
nplot = 1
fig,ax = plt.subplots(nplot,1, figsize=(FIGX, nplot*PLOTY))
for avg,fname in zip(avgs,fnames):
fpaths = [os.path.join(os.path.dirname(fname), "163", "m_1_1_20", "lightcurve.dat"),
os.path.join(os.path.dirname(fname), "17", "m_1_1_20", "lightcurve.dat")]
for fpath in fpaths:
print(fpath)
if os.path.exists(fpath):
print("Found ",fpath)
cols = np.loadtxt(fpath).transpose()
# Normalize to 2000 elements
f_len = cols.shape[1]
t_len = avg['t'].size
if f_len >= t_len:
avg['lightcurve'] = cols[2][:t_len]
avg['lightcurve_pol'] = cols[1][:t_len]
elif f_len < t_len:
avg['lightcurve'] = np.zeros(t_len)
avg['lightcurve_pol'] = np.zeros(t_len)
avg['lightcurve'][:f_len] = cols[2]
avg['lightcurve'][f_len:] = avg['lightcurve'][f_len-1]
avg['lightcurve_pol'][:f_len] = cols[1]
avg['lightcurve_pol'][f_len:] = avg['lightcurve_pol'][f_len-1]
plot_multi(ax, 't', 'lightcurve', r"ipole lightcurve", timelabels=True)
print_av_var('lightcurve', "Lightcurve from ipole")
if len(labels) > 1:
ax.legend(loc='upper left')
else:
fig.suptitle(labels[0])
plt.savefig(fname_out + '_lcs.png')
plt.close(fig)
def plot_extras():
nplot = 2
fig, ax = plt.subplots(nplot,1, figsize=(FIGX, nplot*PLOTY))
# Efficiency explicitly as a percentage
for i,avg in enumerate(avgs):
if 'Edot' in avg.keys():
avg['Eff'] = (avg['Mdot'] + avg['Edot'])/avg['Mdot_av']*100
plot_multi(ax[0], 't', 'Eff', "Efficiency (%)", ylim=[-10,200])
plot_multi(ax[1], 't', 'Edot', r"$\dot{E}$", timelabels=True)
if len(labels) > 1:
ax[0].legend(loc='upper left')
else:
ax[0].set_title(labels[0])
plt.savefig(fname_out + '_extras.png')
plt.close(fig)
def plot_diags():
nplot = 7
fig, ax = plt.subplots(nplot,1, figsize=(FIGX, nplot*PLOTY))
ax = ax.flatten()
plot_multi(ax[0], 't', 'Etot', "Total E")
plot_multi(ax[1], 't', 'sigma_max', r"$\sigma_{max}$")
plot_multi(ax[2], 't', 'betainv_max', r"$\beta^{-1}_{max}$")
plot_multi(ax[3], 't', 'Theta_max', r"$\Theta_{max}$")
plot_multi(ax[4], 't', 'rho_min', r"$\rho_{min}$")
plot_multi(ax[5], 't', 'U_min', r"$U_{min}$")
# TODO include HARM's own diagnostics somehow? Re-insert just this one?
plot_multi(ax[6], 't_d', 'divbmax_d', "max divB", timelabels=True)
if len(labels) > 1:
ax[1].legend(loc='lower right')
else:
ax[1].set_title(labels[0])
plt.savefig(fname_out + '_diagnostics.png')
plt.close(fig)
def plot_omega():
# Omega
fig, ax = plt.subplots(2,1, figsize=(FIGX, FIGY))
# Renormalize omega to omega/Omega_H for plotting
for avg in avgs:
if 'omega_hth' in avg.keys(): #Then both are
avg['omega_hth'] *= 4/avg['a']
avg['omega_av_hth'] *= 4/avg['a']
plot_multi(ax[0], 'th_5', 'omega_hth', r"$\omega_f$/$\Omega_H$ (EH, single shell)", ylim=[-1,2])
plot_multi(ax[1], 'th_5', 'omega_av_hth', r"$\omega_f$/$\Omega_H$ (EH, 5-zone average)", ylim=[-1,2])
# Legend
if len(labels) > 1:
ax[0].legend(loc='lower left')
else:
ax[0].set_title(labels[0])
# Horizontal guidelines
for a in ax.flatten():
a.axhline(0.5, linestyle='--', color='k')
plt.savefig(fname_out + '_omega.png')
plt.close(fig)
def plot_th_profs():
# Resolution-dependence of values in midplane
fig, ax = plt.subplots(1,2, figsize=(FIGX, FIGY/3))
plot_multi(ax[0], 'th_eh', 'betainv_25_th', r"$\beta^{-1} (r = 25)$", logy=True)
plot_multi(ax[1], 'th_eh', 'sigma_25_th', r"$\sigma (r = 25)$", logy=True)
# Legend
if len(labels) > 1:
ax[0].legend(loc='lower left')
else:
ax[0].set_title(labels[0])
plt.savefig(fname_out + '_th_profs.png')
plt.close(fig)
def plot_cfs():
# Correlation functions in midplane
fig, ax = plt.subplots(2, 2, figsize=(FIGX, FIGY))
for avg in avgs:
if np.max(avg['rho_cf_rphi']) > 2:
del avg['rho_cf_rphi'], avg['betainv_cf_rphi'], avg['rho_cf_10_phi'], avg['betainv_cf_10_phi']
if 'rho_cf_rphi' in avg:
avg['rho_cl_r'] = corr_length(avg['rho_cf_rphi'])
avg['rho_cf_10_phi'] = avg['rho_cf_rphi'][i_of(avg['r'], 10)]
if 'betainv_cf_rphi' in avg:
avg['betainv_cl_r'] = corr_length(avg['betainv_cf_rphi'])
avg['betainv_cf_10_phi'] = avg['betainv_cf_rphi'][i_of(avg['r'], 10)]
plot_multi(ax[0, 0], 'phi', 'rho_cf_10_phi', r"$\bar{R}(\rho) (r = 10)$", xlim=[0, np.pi])
plot_multi(ax[0, 1], 'phi', 'betainv_cf_10_phi', r"$\bar{R}(\beta^{-1}) (r = 10)$", xlim=[0, np.pi])
plot_multi(ax[1, 0], 'r', 'rho_cl_r', r"$\lambda (\rho, r)$", logx=True, xlim=[1, 500])
plot_multi(ax[1, 1], 'r', 'betainv_cl_r', r"$\lambda (\rho, r)$", logx=True, xlim=[1, 500])
# Legend
if len(labels) > 1:
ax[0,1].legend(loc='lower left')
else:
fig.suptitle(labels[0])
plt.savefig(fname_out + '_cls.png')
plt.close(fig)
def plot_flux_profs():
# For converting to theta
Xgeom = np.zeros((4,1,geom['n2']))
Xgeom[1] = avg['r'][iBZ]
Xgeom[2] = avg['th_100']
to_th = 1/dxdX_to_KS(Xgeom, Met.FMKS, geom)[2,2,1]
for avg in avgs:
if 'FE_100_th' in avg.keys(): # Then all are
avg['FE_100_th'] *= to_th
avg['FE_Fl_100_th'] *= to_th
avg['FE_EM_100_th'] *= to_th
plot_multi(ax[0,0], 'th_100', 'FE_100_th', r"$\frac{dFE}{d\theta}$ ($r = 100$)")
plot_multi(ax[0,1], 'th_100', 'FE_Fl_100_th', r"$\frac{dFE_{Fl}}{d\theta}$ ($r = 100$)")
plot_multi(ax[1,0], 'th_100', 'FE_EM_100_th', r"$\frac{dFE_{EM}}{d\theta}$ ($r = 100$)")
# Legend
ax[0,0].legend(loc='lower left')
plt.savefig(fname_out + '_flux_profs.png')
plt.close(fig)
def plot_var_compare():
nplotsy, nplotsx = 2,2
fig, ax = plt.subplots(nplotsy, nplotsx, figsize=(FIGX, FIGY))
for i,vname in enumerate(['phi_b', 'ldot', 'edot', 'lightcurve']):
stddevs = [np.std(qui(avg,vname))/np.abs(np.mean(qui(avg,vname))) for avg in avgs if vname in avg.keys()]
n2s = [int(fname.split("x")[1]) for fname in fnames]
axis = ax[i//nplotsy, i % nplotsx]
axis.plot(n2s, stddevs, marker='o', color='k')
axis.set_xscale('log')
axis.set_xlabel(r"$N_{\theta}$")
axis.set_ylim([0,None])
axis.set_title("Relative variance of "+vname)
plt.savefig(fname_out + '_var_compare.png')
plt.close(fig)
def plot_pdfs():
nplotsy, nplotsx = 2, 1
fig, ax = plt.subplots(nplotsy, nplotsx, figsize=(FIGX, FIGY))
pdf_vars = []
for avg in avgs:
avg['pdf_bins'] = np.linspace(-3.5, 3.5, 200)
for var in avg:
if var[-4:] == '_pdf' and var not in pdf_vars:
pdf_vars.append(var)
for i,var in enumerate(pdf_vars):
plot_multi(ax[i], 'pdf_bins', var, var)
# Legend
if len(labels) > 1:
ax[0].legend(loc='upper right')
else:
fig.suptitle(labels[0])
plt.savefig(fname_out + '_pdfs.png')
plt.close(fig)
def plot_jsq():
nplotsy, nplotsx = 1, 1
fig, ax = plt.subplots(nplotsy, nplotsx, figsize=(FIGX, PLOTY))
for avg in avgs:
if 'Jsqtot_rt' in avg:
avg['Jsqtot_t'] = np.sum(avg['Jsqtot_rt'], axis=-1)
plot_multi(ax, 't', 'Jsqtot_t', r"Total $J^2$ on grid", timelabels=True)
print_av_var('Jsqtot_t', "Total J^2")
plt.savefig(fname_out + '_jsq.png')
plt.close(fig)
if __name__ == "__main__":
if len(sys.argv) < 3:
util.warn('Format: python eht_plot.py analysis_output [analysis_output ...] [labels_list] image_name')
sys.exit()
# All interior arguments are files to overplot, except possibly the last
if len(sys.argv) < 4:
last_file = -1
else:
last_file = -2
fnames = sys.argv[1:last_file]
avgs = []
for filename in fnames:
# Encoding arg is for python2 numpy bytestrings
avgs.append(pickle.load(open(filename,'rb'), encoding = 'latin1'))
# Split the labels, or use the output name as a label
if len(sys.argv) > 3:
labels = sys.argv[-2].split(",")
else:
labels = [sys.argv[-1].replace("_"," ")]
if len(labels) < len(avgs):
util.warn("Too few labels!")
sys.exit()
fname_out = sys.argv[-1]
# For time plots. Also take MAD/SANE for axes?
#ti = avgs[0]['t'][0]
nt = avgs[0]['t'].size
ti = avgs[0]['t'][nt//2]
tf = avgs[0]['t'][-1]
# Default styles
if len(avgs) > 1:
styles = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
else:
styles = ['k']
if RADS: plot_ravgs()
if FLUXES: plot_fluxes()
if EXTRAS: plot_extras()
if DIAGS: plot_diags()
if OMEGA: plot_omega()
if BSQ: plot_bsq_rise()
if TEMP: plot_temp()
if MFLUX: plot_mflux()
if BFLUX: plot_Bflux()
if TH_PROFS: plot_th_profs()
if LCS: plot_lcs()
if CFUNCS: plot_cfs()
if PSPECS: plot_pspecs()
if PDFS: plot_pdfs()
if JSQ: plot_jsq()
if len(avgs) == 1:
if FLUX_PROF: plot_flux_profs()
else:
if COMPARE: plot_var_compare()
| 22,053 | 31.101892 | 147 |
py
|
iharm3d
|
iharm3d-master/script/analysis/plot_diff.py
|
################################################################################
# #
# PLOT DIFFERENCES IN TWO FILES #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
USEARRSPACE=True
NLINES = 20
SIZE = 600
FIGX = 20
FIGY = 16
dump1file = sys.argv[1]
dump2file = sys.argv[2]
imname = sys.argv[3]
hdr, geom, dump1 = io.load_all(dump1file, derived_vars=False)
#Hopefully this fails for dumps that shouldn't be compared
dump2 = io.load_dump(dump2file, hdr, geom, derived_vars=False)
N1 = hdr['n1']; N2 = hdr['n2']; N3 = hdr['n3']
log_floor = -60
# TODO properly option log, rel, lim
def plot_diff_xy(ax, var, rel=False, lim=None):
if rel:
if lim is not None:
bplt.plot_xy(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), vmin=0, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xy(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), label=var, cbar=False, arrayspace=USEARRSPACE)
else:
if lim is not None:
bplt.plot_xy(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xy(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=0, label=var, cbar=False, arrayspace=USEARRSPACE)
def plot_diff_xz(ax, var, rel=False, lim=None):
if rel:
if lim is not None:
bplt.plot_xz(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), vmin=0, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xz(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), label=var, cbar=False, arrayspace=USEARRSPACE)
else:
if lim is not None:
bplt.plot_xz(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xz(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=0, label=var, cbar=False, arrayspace=USEARRSPACE)
# Plot the difference
nxplot = 4
nyplot = 3
vars = list(hdr['prim_names'])+['fail','divB']
fig = plt.figure(figsize=(FIGX, FIGY))
for i,name in enumerate(vars):
ax = plt.subplot(nyplot, nxplot, i+1)
plot_diff_xy(ax, name)
ax.set_xlabel('')
ax.set_ylabel('')
plt.tight_layout()
plt.savefig(imname+"_xy.png", dpi=100)
plt.close(fig)
fig = plt.figure(figsize=(FIGX, FIGY))
for i,name in enumerate(vars):
ax = plt.subplot(nyplot, nxplot, i+1)
plot_diff_xz(ax, name)
ax.set_xlabel('')
ax.set_ylabel('')
plt.tight_layout()
plt.savefig(imname+"_xz.png", dpi=100)
plt.close(fig)
| 3,051 | 32.911111 | 150 |
py
|
iharm3d
|
iharm3d-master/script/analysis/eht_analysis.py
|
################################################################################
# #
# CALCULATE TIME-DEPENDENT AND TIME-AVERAGED QUANTITIES #
# #
################################################################################
from __future__ import print_function, division
from analysis_fns import *
import hdf5_to_dict as io
import util
import os, sys
import multiprocessing
import psutil
import pickle
import itertools
import numpy as np
# Option to calculate fluxes at (just inside) r = 5
# This reduces interference from floors
floor_workaround_flux = False
# Option to ignore accretion at high magnetization (funnel)
# This also reduces interference from floors
floor_workaround_funnel = False
# Whether to calculate each expensive set of variables
# Once performed once, calculations will be ported to each new output file
calc_ravgs = True
calc_basic = True
calc_jet_profile = False
calc_jet_cuts = True
calc_lumproxy = True
calc_etot = True
calc_efluxes = False
calc_outfluxes = False
calc_pdfs = True
pdf_nbins = 200
if len(sys.argv) < 2:
util.warn('Format: python eht_analysis.py /path/to/dumps [start time] [start radial averages] [stop radial averages] [stop time]')
sys.exit()
# This doesn't seem like the _right_ way to do optional args
# Skips everything before tstart, averages between tavg_start and tavg_end
tstart = None
tavg_start = None
tavg_end = None
tend = None
if sys.argv[1] == "-d":
debug = True
path = sys.argv[2]
if len(sys.argv) > 3:
tstart = float(sys.argv[3])
if len(sys.argv) > 4:
tavg_start = float(sys.argv[4])
if len(sys.argv) > 5:
tavg_end = float(sys.argv[5])
if len(sys.argv) > 6:
tend = float(sys.argv[6])
else:
debug = False
path = sys.argv[1]
if len(sys.argv) > 2:
tstart = float(sys.argv[2])
if len(sys.argv) > 3:
tavg_start = float(sys.argv[3])
if len(sys.argv) > 4:
tavg_end = float(sys.argv[4])
if len(sys.argv) > 5:
tend = float(sys.argv[5])
dumps = io.get_dumps_list(path)
ND = len(dumps)
hdr = io.load_hdr(dumps[0])
geom = io.load_geom(hdr, path)
if tstart is None:
tstart = 0.
# If the time after which to average wasn't given, just use the back half of dumps
if tavg_start is None:
tavg_start = io.get_dump_time(dumps[ND//2]) - 0.1
# Sometimes we don't know times (i.e. above will be 0) but want averages
# We always want to average over all dumps in these cases
if tavg_start < 0.:
tavg_start = 0.
if tavg_end is None:
tavg_end = io.get_dump_time(dumps[-1])
if tavg_end == 0.:
tavg_end = float(ND)
if tend is None:
tend = io.get_dump_time(dumps[-1])
if tend == 0.:
tend = float(ND)
# Leave several extra zones if using MKS3 coordinates
if geom['metric'] == "MKS3":
iEH = i_of(geom, hdr['r_eh'])+4
else:
iEH = i_of(geom, hdr['r_eh'])
if floor_workaround_flux:
iF = i_of(geom, 5) # Measure fluxes at r=5M
else:
iF = iEH
# Max radius when computing "total" energy
iEmax = i_of(geom, 40)
# BZ luminosity
# 100M seems like the standard measuring spot (or at least, BHAC does it that way)
# L_BZ seems constant* after that, but much higher within ~50M
if geom['r_out'] < 100 or geom['r'][-1,geom['n2']//2,0] < 100: # If in theory or practice the sim is small...
iBZ = i_of(geom, 40) # most SANEs
else:
iBZ = i_of(geom, 100) # most MADs
jmin, jmax = get_j_vals(geom)
print("Running from t={} to {}, averaging from {} to {}".format(tstart, tend, tavg_start, tavg_end))
print("Using EH at zone {}, Fluxes at zone {}, Emax within zone {}, L_BZ at zone {}".format(iEH, iF, iEmax, iBZ))
def avg_dump(n):
out = {}
out['t'] = io.get_dump_time(dumps[n])
# When we don't know times, fudge
if out['t'] == 0 and n != 0:
out['t'] = n
if out['t'] < tstart or out['t'] > tend:
#print("Loaded {} / {}: {} (SKIPPED)".format((n+1), len(dumps), out['t']))
# Still return the time
return out
else:
print("Loaded {} / {}: {}".format((n+1), len(dumps), out['t']))
dump = io.load_dump(dumps[n], hdr, geom, extras=False)
# EHT Radial profiles: special fn for profile, averaged over phi, 1/3 theta, time
if calc_ravgs:
for var in ['rho', 'Theta', 'B', 'Pg', 'Ptot', 'beta', 'u^phi', 'u_phi', 'sigma', 'FM']:
out[var+'_rt'] = eht_profile(geom, d_fns[var](dump), jmin, jmax)
out[var+'_jet_rt'] = eht_profile(geom, d_fns[var](dump), 0, jmin) + eht_profile(geom, d_fns[var](dump), jmax, geom['n2'])
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[var+'_r'] = out[var+'_rt']
out[var+'_jet_r'] = out[var+'_jet_rt']
if out['t'] >= tavg_start and out['t'] <= tavg_end:
# CORRELATION FUNCTION
for var in ['rho', 'betainv']:
Rvar = corr_midplane(geom, d_fns[var](dump))
out[var+'_cf_rphi'] = Rvar
out[var+'_cf_10_phi'] = Rvar[i_of(geom,10),:]
# THETA AVERAGES
for var in ['betainv', 'sigma']:
out[var+'_25_th'] = theta_av(geom, d_fns[var](dump), i_of(geom, 25), 5, fold=False)
# These are divided averages, not average of division, so not amenable to d_fns
Fcov01, Fcov13 = Fcov(geom, dump, 0, 1), Fcov(geom, dump, 1, 3)
out['omega_hth'] = theta_av(geom, Fcov01, iEH, 1) / theta_av(geom, Fcov13, iEH, 1)
out['omega_av_hth'] = theta_av(geom, Fcov01, iEH, 5) / theta_av(geom, Fcov13, iEH, 5)
# This produces much worse results
#out['omega_alt_hth'] = theta_av(Fcov(dump, 0, 2), iEH, 1) / theta_av(Fcov(dump, 2, 3), iEH, 1)
#out['omega_alt_av_hth'] = theta_av(Fcov(dump, 0, 2), iEH-2, 5) / theta_av(Fcov(dump, 2, 3), iEH-2, 5)
if calc_basic:
# FIELD STRENGTHS
# The HARM B_unit is sqrt(4pi)*c*sqrt(rho) which has caused issues:
#norm = np.sqrt(4*np.pi) # This is what I believe matches T,N,M '11 and Narayan '12
norm = 1 # This is what the EHT comparison uses?
if geom['mixed_metrics']:
# When different, B1 will be in the _vector_ coordinates. Must perform the integral in those instead of zone coords
# Some gymnastics were done to keep in-memory size small
dxEH = np.einsum("i,...ij->...j", np.array([0, geom['dx1'], geom['dx2'], geom['dx3']]), np.linalg.inv(geom['vec_to_grid'][iEH,:,:,:]))
out['Phi_b'] = 0.5*norm * np.sum( np.fabs(dump['B1'][iEH,:,:]) * geom['gdet_vec'][iEH,:,None]*dxEH[:,None,2]*dxEH[:,None,3], axis=(0,1) )
else:
out['Phi_sph_r'] = 0.5*norm*sum_shell(geom, np.fabs(dump['B1']))
out['Phi_b'] = out['Phi_sph_r'][iEH]
out['Phi_mid_r'] = np.zeros_like(out['Phi_sph_r'])
for i in range(geom['n1']):
out['Phi_mid_r'][i] = norm*sum_plane(geom, -dump['B2'], within=i)
# FLUXES
# Radial profiles of Mdot and Edot, and their particular values
# EHT code-comparison normalization has all these values positive
for var,flux in [['Edot','FE'],['Mdot','FM'],['Ldot','FL']]:
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[flux+'_r'] = sum_shell(geom, d_fns[flux](dump))
out[var] = sum_shell(geom, d_fns[flux](dump), at_zone=iF)
# Mdot and Edot are defined inward
out['Mdot'] *= -1
out['Edot'] *= -1
# Maxima (for gauging floors)
for var in ['sigma', 'betainv', 'Theta']:
out[var+'_max'] = np.max(d_fns[var](dump))
# Minima
for var in ['rho', 'U']:
out[var+'_min'] = np.min(d_fns[var](dump))
# TODO KEL? plot in "floor space"? Full set of energy ratios?
# Profiles of different fluxes to gauge jet power calculations
if calc_jet_profile:
for var in ['rho', 'bsq', 'FM', 'FE', 'FE_EM', 'FE_Fl', 'FL', 'FL_EM', 'FL_Fl', 'betagamma', 'Be_nob', 'Be_b', 'mu']:
out[var+'_100_tht'] = np.sum(d_fns[var](dump)[iBZ], axis=-1)
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[var+'_100_th'] = out[var+'_100_tht']
out[var+'_100_thphi'] = d_fns[var](dump)[iBZ,:,:]
out[var+'_rth'] = d_fns[var](dump).mean(axis=-1)
# Blandford-Znajek Luminosity L_BZ
# This is a lot of luminosities!
if calc_jet_cuts:
# TODO cut on phi/t averages? -- needs 2-pass cut...
cuts = {'sigma1' : lambda dump : (d_fns['sigma'](dump) > 1),
#'sigma10' : lambda dump : (d_fns['sigma'](dump) > 10),
'Be_b0' : lambda dump : (d_fns['Be_b'](dump) > 0.02),
'Be_b1' : lambda dump : (d_fns['Be_b'](dump) > 1),
'Be_nob0' : lambda dump : (d_fns['Be_nob'](dump) > 0.02),
'Be_nob1' : lambda dump : (d_fns['Be_nob'](dump) > 1),
#'mu1' : lambda dump : (d_fns['mu'](dump) > 1),
#'mu2' : lambda dump : (d_fns['mu'](dump) > 2),
#'mu3' : lambda dump : (d_fns['mu'](dump) > 3),
'bg1' : lambda dump : (d_fns['betagamma'](dump) > 1.0),
'bg05' : lambda dump : (d_fns['betagamma'](dump) > 0.5),
'allp' : lambda dump : (d_fns['FE'](dump) > 0)}
# Terminology:
# LBZ = E&M energy only, any cut
# Lj = full E flux, any cut
# Ltot = Lj_allp = full luminosity wherever it is positive
for lum,flux in [['LBZ', 'FE_EM'], ['Lj', 'FE']]:
for cut in cuts.keys():
out[lum+'_'+cut+'_rt'] = sum_shell(geom, d_fns[flux](dump), mask=cuts[cut](dump))
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[lum+'_'+cut+'_r'] = out[lum+'_'+cut+'_rt']
out[lum+'_'+cut] = out[lum+'_'+cut+'_rt'][iBZ]
if calc_lumproxy:
rho, Pg, B = d_fns['rho'](dump), d_fns['Pg'](dump), d_fns['B'](dump)
# See EHT code comparison paper
j = rho**3 / Pg**2 * np.exp(-0.2 * (rho**2 / ( B * Pg**2))**(1./3.))
out['Lum_rt'] = eht_profile(geom, j, jmin, jmax)
if calc_etot:
# Total energy and current, summed by shells to allow cuts on radius
for tot_name, var_name in [['Etot', 'JE0']]:
out[tot_name+'_rt'] = sum_shell(geom, d_fns[var_name](dump))
for tot_name, var_name in [['Jsqtot', 'jsq']]:
out[tot_name+'_rt'] = sum_shell(geom, d_fns[var_name](geom, dump))
if calc_efluxes:
# Conserved (maybe; in steady state) 2D energy flux
for var in ['JE0', 'JE1', 'JE2']:
out[var+'_rt'] = sum_shell(geom, d_fns[var](dump))
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[var+'_rth'] = d_fns[var](dump).mean(axis=-1)
# Total outflowing portions of variables
if calc_outfluxes:
for name,var in [['outflow', 'FM'], ['outEflow', 'FE']]:
var_temp = d_fns[var](dump)
out[name+'_rt'] = sum_shell(geom, var_temp, mask=(var_temp > 0))
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[name+'_r'] = out[name+'_rt']
if calc_pdfs:
for var in ['betainv', 'rho']:
out[var+'_pdf'], _ = np.histogram(np.log10(d_fns[var](dump)),
bins=pdf_nbins, range=(-3.5, 3.5),
weights=np.repeat(geom['gdet'], geom['n3']).reshape((geom['n1'], geom['n2'], geom['n3'])),
density=True)
dump.clear()
del dump
return out
def merge_dict(n, out, out_full):
# Merge the output dicts
# TODO write to an HDF5 file incrementally?
for key in list(out.keys()):
if key not in out_full:
if key[-3:] == '_rt':
out_full[key] = np.zeros((ND, hdr['n1']))
elif key[-5:] == '_htht':
out_full[key] = np.zeros((ND, hdr['n2']//2))
elif key[-4:] == '_tht':
out_full[key] = np.zeros((ND, hdr['n2']))
elif key[-5:] == '_rtht':
out_full[key] = np.zeros((ND, hdr['n1'], hdr['n2']))
elif key[-7:] == '_thphit':
out_full[key] = np.zeros((ND, hdr['n2'], hdr['n3']))
elif key[-5:] == '_pdft':
out_full[key] = np.zeros((ND, pdf_nbins))
elif (key[-2:] == '_r' or key[-4:] == '_hth' or key[-3:] == '_th' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-5:] == '_rphi' or key[-6:] == '_thphi' or key[-4:] == '_pdf'):
out_full[key] = np.zeros_like(out[key])
else:
out_full[key] = np.zeros(ND)
if (key[-2:] == '_r' or key[-4:] == '_hth' or key[-3:] == '_th' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-5:] == '_rphi' or key[-6:] == '_thphi' or key[-4:] == '_pdf'):
# Weight the average correctly for _us_. Full weighting will be done on merge w/'avg_w'
if my_avg_range > 0:
out_full[key] += out[key]/my_avg_range
else:
out_full[key][n] = out[key]
# TODO this, properly, some other day
if ND < 200:
nstart, nmin, nmax, nend = 0, 0, ND-1, ND-1
elif ND < 300:
nstart, nmin, nmax, nend = 0, ND//2, ND-1, ND-1
else:
nstart, nmin, nmax, nend = int(tstart)//5, int(tavg_start)//5, int(tavg_end)//5, int(tend)//5
full_avg_range = nmax - nmin
if nmin < nstart: nmin = nstart
if nmin > nend: nmin = nend
if nmax < nstart: nmax = nstart
if nmax > nend: nmax = nend
my_avg_range = nmax - nmin
print("nstart = {}, nmin = {}, nmax = {} nend = {}".format(nstart,nmin,nmax,nend))
# Make a dict for merged variables, throw in what we know now to make merging easier
out_full = {}
for key in ['a', 'gam', 'gam_e', 'gam_p']:
out_full[key] = hdr[key]
# Toss in the common geom lists and our weight in the overall average
out_full['r'] = geom['r'][:,hdr['n2']//2,0]
# For quick angular plots. Note most will need geometry to convert from dX2 to dth
out_full['th_eh'] = geom['th'][iEH,:,0]
out_full['th_bz'] = geom['th'][iBZ,:,0]
out_full['phi'] = geom['phi'][0,hdr['n2']//2,:]
out_full['avg_start'] = tavg_start
out_full['avg_end'] = tavg_end
out_full['avg_w'] = my_avg_range / full_avg_range
print("Will weight averages by {}".format(out_full['avg_w']))
# Fill the output dict with all per-dump or averaged stuff
# Hopefully in a way that doesn't keep too much of it around in memory
nthreads = util.calc_nthreads(hdr, pad=0.2)
util.iter_parallel(avg_dump, merge_dict, out_full, ND, nthreads)
# Add divBmax from HARM's own diagnostic output, if available. We can recompute the rest, but not this
diag = io.load_log(path)
if diag is not None:
out_full['t_d'] = diag['t']
out_full['divbmax_d'] = diag['divbmax']
# Deduce the name of the output file
if tstart > 0 or tend < 10000:
outfname = "eht_out_{}_{}.p".format(tstart,tend)
else:
outfname = "eht_out.p"
# See if there's anything already there we're not calculating, and import it
if os.path.exists(outfname):
with open(outfname, "rb") as prev_file:
out_old = pickle.load(prev_file)
for key in out_old:
if key not in out_full:
out_full[key] = out_old[key]
# OUTPUT
with open(outfname, "wb") as outf:
print("Writing {}".format(outfname))
pickle.dump(out_full, outf)
| 14,716 | 37.225974 | 143 |
py
|
iharm3d
|
iharm3d-master/script/analysis/analysis_fns.py
|
# Convenient analysis functions for physical calculations and averages
# Meant to be imported "from analysis_fns import *" for convenience
import numpy as np
import scipy.fftpack as fft
# Define a dict of names, coupled with the functions required to obtain their variables.
# That way, we only need to specify lists and final operations in eht_analysis,
# AND don't need to cart all these things around in memory
d_fns = {'rho': lambda dump: dump['RHO'],
'bsq': lambda dump: dump['bsq'],
'sigma': lambda dump: dump['bsq'] / dump['RHO'],
'U': lambda dump: dump['UU'],
'u_t': lambda dump: dump['ucov'][:, :, :, 0],
'u_phi': lambda dump: dump['ucov'][:, :, :, 3],
'u^phi': lambda dump: dump['ucon'][:, :, :, 3],
'FM': lambda dump: dump['RHO'] * dump['ucon'][:, :, :, 1],
'FE': lambda dump: -T_mixed(dump, 1, 0),
'FE_EM': lambda dump: -TEM_mixed(dump, 1, 0),
'FE_Fl': lambda dump: -TFl_mixed(dump, 1, 0),
'FL': lambda dump: T_mixed(dump, 1, 3),
'FL_EM': lambda dump: TEM_mixed(dump, 1, 3),
'FL_Fl': lambda dump: TFl_mixed(dump, 1, 3),
'Be_b': lambda dump: bernoulli(dump, with_B=True),
'Be_nob': lambda dump: bernoulli(dump, with_B=False),
'Pg': lambda dump: (dump['hdr']['gam'] - 1.) * dump['UU'],
'Pb': lambda dump: dump['bsq'] / 2,
'Ptot': lambda dump: d_fns['Pg'](dump) + d_fns['Pb'](dump),
'beta': lambda dump: dump['beta'],
'betainv': lambda dump: 1/dump['beta'],
'jcon': lambda dump: dump['jcon'],
# TODO TODO TODO take geom everywhere or nowhere
'jcov': lambda geom, dump: jcov(geom, dump),
'jsq': lambda geom, dump: jsq(geom, dump),
'B': lambda dump: np.sqrt(dump['bsq']),
'betagamma': lambda dump: np.sqrt((d_fns['FE_EM'](dump) + d_fns['FE_Fl'](dump))/d_fns['FM'](dump) - 1),
'Theta': lambda dump: (dump['hdr']['gam'] - 1) * dump['UU'] / dump['RHO'],
'Thetap': lambda dump: (dump['hdr']['gam_p'] - 1) * (dump['UU']) / dump['RHO'],
'Thetae': lambda dump: (dump['hdr']['gam_e'] - 1) * (dump['UU']) / dump['RHO'],
'gamma': lambda geom, dump: get_gamma(geom, dump),
'JE0': lambda dump: T_mixed(dump, 0, 0),
'JE1': lambda dump: T_mixed(dump, 1, 0),
'JE2': lambda dump: T_mixed(dump, 2, 0)
}
# Additions I'm unsure of or which are useless
#'rur' : lambda dump: geom['r']*dump['ucon'][:,:,:,1],
#'mu' : lambda dump: (d_fns['FE'](dump) + d_fns['FM'](dump)) / d_fns['FM'](dump),
## Physics functions ##
# These are separated to make them faster
def T_con(geom, dump, i, j):
gam = dump['hdr']['gam']
return ( (dump['RHO'] + gam*dump['UU'] + dump['bsq'])*dump['ucon'][:,:,:,i]*dump['ucon'][:,:,:,j] +
((gam-1)*dump['UU'] + dump['bsq']/2)*geom['gcon'][:,:,None,i,j] - dump['bcon'][:,:,:,i]*dump['bcon'][:,:,:,j] )
def T_cov(geom, dump, i, j):
gam = dump['hdr']['gam']
return ( (dump['RHO'] + gam*dump['UU'] + dump['bsq'])*dump['ucov'][:,:,:,i]*dump['ucov'][:,:,:,j] +
((gam-1)*dump['UU'] + dump['bsq']/2)*geom['gcov'][:,:,None,i,j] - dump['bcov'][:,:,:,i]*dump['bcov'][:,:,:,j] )
def T_mixed(dump, i, j):
gam = dump['hdr']['gam']
if i != j:
return ( (dump['RHO'] + gam*dump['UU'] + dump['bsq'])*dump['ucon'][:,:,:,i]*dump['ucov'][:,:,:,j] +
- dump['bcon'][:,:,:,i]*dump['bcov'][:,:,:,j] )
else:
return ( (dump['RHO'] + gam*dump['UU'] + dump['bsq']) * dump['ucon'][:,:,:,i]*dump['ucov'][:,:,:,j] +
(gam-1)*dump['UU'] + dump['bsq']/2 - dump['bcon'][:,:,:,i]*dump['bcov'][:,:,:,j] )
def TEM_mixed(dump, i, j):
if i != j:
return dump['bsq'][:,:,:]*dump['ucon'][:,:,:,i]*dump['ucov'][:,:,:,j] - dump['bcon'][:,:,:,i]*dump['bcov'][:,:,:,j]
else:
return dump['bsq'][:,:,:]*dump['ucon'][:,:,:,i]*dump['ucov'][:,:,:,j] + dump['bsq']/2 - dump['bcon'][:,:,:,i]*dump['bcov'][:,:,:,j]
def TFl_mixed(dump, i, j):
gam = dump['hdr']['gam']
if i != j:
return (dump['RHO'] + dump['hdr']['gam']*dump['UU'])*dump['ucon'][:,:,:,i]*dump['ucov'][:,:,:,j]
else:
return (dump['RHO'] + dump['hdr']['gam']*dump['UU'])*dump['ucon'][:,:,:,i]*dump['ucov'][:,:,:,j] + (gam-1)*dump['UU']
# Return the i,j component of contravarient Maxwell tensor
# TODO there's a computationally easier way to do this:
# Pre-populate an antisym ndarray and einsum
# Same below
def Fcon(geom, dump, i, j):
NDIM = dump['hdr']['n_dim']
Fconij = np.zeros_like(dump['RHO'])
if i != j:
for mu in range(NDIM):
for nu in range(NDIM):
Fconij[:, :, :] += _antisym(i, j, mu, nu) * dump['ucov'][:, :, :, mu] * dump['bcov'][:, :, :, nu]
# Specify we want gdet in the vectors' coordinate system (this matters for KORAL dump files)
# TODO is normalization correct?
return Fconij*geom['gdet'][:,:,None]
def Fcov(geom, dump, i, j):
NDIM = dump['hdr']['n_dim']
Fcovij = np.zeros_like(dump['RHO'])
for mu in range(NDIM):
for nu in range(NDIM):
Fcovij += Fcon(geom, dump, mu, nu)*geom['gcov'][:,:,None,mu,i]*geom['gcov'][:,:,None,nu,j]
return Fcovij
def bernoulli(dump, with_B=False):
if with_B:
return -T_mixed(dump,0,0) / (dump['RHO']*dump['ucon'][:,:,:,0]) - 1
else:
return -(1 + dump['hdr']['gam']*dump['UU']/dump['RHO'])*dump['ucov'][:,:,:,0] - 1
# This is in zone metric!
def lower(geom, vec):
return np.einsum("...i,...ij->...j", vec, geom['gcov'][:,:,None,:,:])
def to_zone_coords(geom, vec):
return np.einsum("...i,...ij->...j", vec, geom['vec_to_grid'][:,:,None,:,:])
# Compute 4-vectors given fluid state
# Always returns vectors in the _grid_ coordinate system, to simplify analysis
def get_state(hdr, geom, dump, return_gamma=False):
ucon = np.zeros([hdr['n1'],hdr['n2'],hdr['n3'],hdr['n_dim']])
ucov = np.zeros_like(ucon)
bcon = np.zeros_like(ucon)
bcov = np.zeros_like(ucon)
# Aliases to make the below more readable
if geom['mixed_metrics']:
# Make sure these are in the vector metric if mixing
gcov = geom['gcov_vec']
gcon = geom['gcon_vec']
alpha = geom['lapse_vec']
else:
gcov = geom['gcov']
gcon = geom['gcon']
alpha = geom['lapse']
B1 = dump['B1']
B2 = dump['B2']
B3 = dump['B3']
gamma = get_gamma(geom, dump)
ucon[:,:,:,0] = gamma/(alpha[:,:,None])
ucon[:,:,:,1] = dump['U1'] - gamma*alpha[:,:,None]*gcon[:,:,None,0,1]
ucon[:,:,:,2] = dump['U2'] - gamma*alpha[:,:,None]*gcon[:,:,None,0,2]
ucon[:,:,:,3] = dump['U3'] - gamma*alpha[:,:,None]*gcon[:,:,None,0,3]
ucov = np.einsum("...i,...ij->...j", ucon, gcov[:,:,None,:,:])
bcon[:,:,:,0] = B1*ucov[:,:,:,1] + B2*ucov[:,:,:,2] + B3*ucov[:,:,:,3]
bcon[:,:,:,1] = (B1 + bcon[:,:,:,0]*ucon[:,:,:,1])/ucon[:,:,:,0]
bcon[:,:,:,2] = (B2 + bcon[:,:,:,0]*ucon[:,:,:,2])/ucon[:,:,:,0]
bcon[:,:,:,3] = (B3 + bcon[:,:,:,0]*ucon[:,:,:,3])/ucon[:,:,:,0]
if geom['mixed_metrics']:
# Convert all 4-vectors to zone coordinates
ucon = np.einsum("...i,...ij->...j", ucon, geom['vec_to_grid'][:,:,None,:,:])
ucov = np.einsum("...i,...ij->...j", ucon, geom['gcov'][:,:,None,:,:]) # Lower with _zone_ metric
bcon = np.einsum("...i,...ij->...j", bcon, geom['vec_to_grid'][:,:,None,:,:])
bcov = np.einsum("...i,...ij->...j", bcon, geom['gcov'][:,:,None,:,:])
else:
# Already have ucov in this case
bcov = np.einsum("...i,...ij->...j", bcon, gcov[:,:,None,:,:])
if return_gamma:
return ucon, ucov, bcon, bcov, gamma
else:
return ucon, ucov, bcon, bcov
def get_gamma(geom, dump):
# Aliases to make the below more readable
if geom['mixed_metrics']:
# Make sure this is in the vector metric if mixing
gcov = geom['gcov_vec']
else:
gcov = geom['gcov']
U1 = dump['U1']
U2 = dump['U2']
U3 = dump['U3']
qsq = (gcov[:,:,None,1,1]*U1**2 + gcov[:,:,None,2,2]*U2**2 +
gcov[:,:,None,3,3]*U3**2 + 2.*(gcov[:,:,None,1,2]*U1*U2 +
gcov[:,:,None,1,3]*U1*U3 +
gcov[:,:,None,2,3]*U2*U3))
return np.sqrt(1. + qsq)
def jcov(geom, dump):
return np.einsum("...i,...ij->...j", dump['jcon'], geom['gcov'][:,:,None,:,:])
def jsq(geom, dump):
return np.sum(dump['jcon']*jcov(geom, dump), axis=-1)
# Decide where to measure fluxes
def i_of(geom, rcoord):
i = 0
while geom['r'][i,geom['n2']//2,0] < rcoord:
i += 1
i -= 1
return i
## Correlation functions/lengths ##
def corr_midplane(geom, var, norm=True, at_i1=None):
if at_i1 is None:
at_i1 = range(geom['n1'])
jmin = geom['n2']//2-1
jmax = geom['n2']//2+1
R = np.zeros((len(at_i1), geom['n3']))
# TODO is there a way to vectorize over R? Also, are we going to average over adjacent r ever?
for i1 in at_i1:
# Average over small angle around midplane
var_phi = np.mean(var[i1, jmin:jmax, :], axis=0)
# Calculate autocorrelation
var_phi_normal = (var_phi - np.mean(var_phi))/np.std(var_phi)
var_corr = fft.ifft(np.abs(fft.fft(var_phi_normal))**2)
R[i1] = np.real(var_corr)/(var_corr.size)
if norm:
normR = R[:,0]
for k in range(geom['n3']):
R[:, k] /= normR
return R
# TODO needs work...
def jnu_inv(nu, Thetae, Ne, B, theta):
K2 = 2.*Thetae**2
nuc = EE * B / (2. * np.pi * ME * CL)
nus = (2./9.) * nuc * Thetae**2 * np.sin(theta)
j[nu > 1.e12*nus] = 0.
x = nu/nus
f = pow( pow(x, 1./2.) + pow(2.,11./12.)*pow(x,1./6.), 2 )
j = (sqrt(2.) * np.pi * EE**2 * Ne * nus / (3. *CL * K2)) * f * exp(-pow(x,1./3.))
return j / nu**2
def corr_midplane_direct(geom, var, norm=True):
jmin = geom['n2']//2-1
jmax = geom['n2']//2+1
var_norm = np.ones((geom['n1'], 2, geom['n3']))
# Normalize radii separately
for i in range(geom['n1']):
vmean = np.mean(var[i,jmin:jmax,:])
var_norm[i,:,:] = var[i,jmin:jmax,:] - vmean
R = np.ones((geom['n1'], geom['n3']))
for k in range(geom['n3']):
R[:, k] = np.sum(var_norm*np.roll(var_norm, k, axis=-1)*geom['dx3'], axis=(1,2))/2
if norm:
normR = R[:, 0]
for k in range(geom['n3']):
R[:, k] /= normR
return R
def corr_length(R):
# TODO this can be done with a one-liner, I know it
lam = np.zeros(R.shape[0])
for i in range(R.shape[0]):
k = 0
while k < R.shape[1] and R[i, k] >= R[i, 0] / np.exp(1):
k += 1
lam[i] = k*(2*np.pi/R.shape[1])
return lam
## Power Spectra ##
def pspec(var, t, window=0.33, half_overlap=False, bin="fib"):
if not np.any(var[var.size // 2:]):
return np.zeros_like(var), np.zeros_like(var)
data = var[var.size // 2:]
data = data[np.nonzero(data)] - np.mean(data[np.nonzero(data)])
if window < 1:
window = int(window * data.size)
print("FFT window is ", window)
sample_time = (t[-1] - t[0]) / t.size
print("Sampling time is {}".format(sample_time))
out_freq = np.abs(np.fft.fftfreq(window, sample_time))
if half_overlap:
# Hanning w/50% overlap
spacing = (window // 2)
nsamples = data.size // spacing
out = np.zeros(window)
for i in range(nsamples - 1):
windowed = np.hanning(window) * data[i * spacing:(i + window//spacing) * spacing]
out += np.abs(np.fft.fft(windowed)) ** 2
# TODO binning?
freqs = out_freq
else:
# Hamming no overlap, like comparison paper
nsamples = data.size // window
for i in range(nsamples):
windowed = np.hamming(window) * data[i * window:(i + 1) * window]
pspec = np.abs(fft.fft(windowed)) ** 2
# Bin data, declare accumulator output when we know its size
if bin == "fib":
# Modify pspec, allocate for modified form
pspec, freqs = fib_bin(pspec, out_freq)
if i == 0:
out = np.zeros_like(np.array(pspec))
else:
if i == 0:
out = np.zeros(window)
out += pspec
print("PSD using ", nsamples, " segments.")
out /= nsamples
out_freq = freqs
return out, out_freq
def fib_bin(data, freqs):
# Fibonacci binning. Why is this a thing.
j = 0
fib_a = 1
fib_b = 1
pspec = []
pspec_freq = []
while j + fib_b < data.size:
pspec.append(np.mean(data[j:j + fib_b]))
pspec_freq.append(np.mean(freqs[j:j + fib_b]))
j = j + fib_b
fib_c = fib_a + fib_b
fib_a = fib_b
fib_b = fib_c
return np.array(pspec), np.array(pspec_freq)
## Sums and Averages ##
# Var must be a 3D array i.e. a grid scalar
# TODO could maybe be made faster with 'where' but also harder to get right
def sum_shell(geom, var, at_zone=None, mask=None):
integrand = var * geom['gdet'][:, :, None]*geom['dx2']*geom['dx3']
if mask is not None:
integrand *= mask
if at_zone is not None:
return np.sum(integrand[at_zone,:,:], axis=(0,1))
else:
return np.sum(integrand, axis=(1,2))
def sum_plane(geom, var, within=None):
jmin = geom['n2']//2-1
jmax = geom['n2']//2+1
if within is not None:
return np.sum(var[:within,jmin:jmax,:] * geom['gdet'][:within,jmin:jmax,None]*geom['dx1']*geom['dx3']) / (jmax-jmin)
else:
return np.sum(var[:,jmin:jmax,:] * geom['gdet'][:,jmin:jmax,None]*geom['dx1']*geom['dx3']) / (jmax-jmin)
def sum_vol(geom, var, within=None):
if within is not None:
return np.sum(var[:within,:,:] * geom['gdet'][:within,:,None]*geom['dx1']*geom['dx2']*geom['dx3'])
else:
return np.sum(var * geom['gdet'][:,:,None]*geom['dx1']*geom['dx2']*geom['dx3'])
def eht_vol(geom, var, jmin, jmax, outside=None):
if outside is not None:
return np.sum(var[outside:,jmin:jmax,:] * geom['gdet'][outside:,jmin:jmax,None]*geom['dx1']*geom['dx2']*geom['dx3'])
else:
return np.sum(var[:,jmin:jmax,:] * geom['gdet'][:,jmin:jmax,None]*geom['dx1']*geom['dx2']*geom['dx3'])
# TODO can I cache the volume instead of passing these?
def get_j_vals(geom):
THMIN = np.pi/3.
THMAX = 2.*np.pi/3.
# Calculate jmin, jmax for EHT radial profiles
ths = geom['th'][-1,:,0]
for n in range(len(ths)):
if ths[n] > THMIN:
jmin = n
break
for n in range(len(ths)):
if ths[n] > THMAX:
jmax = n
break
return jmin, jmax
# TODO can I cache the volume instead of passing these?
def eht_profile(geom, var, jmin, jmax):
return ( (var[:,jmin:jmax,:] * geom['gdet'][:,jmin:jmax,None]*geom['dx2']*geom['dx3']).sum(axis=(1,2)) /
((geom['gdet'][:,jmin:jmax]*geom['dx2']).sum(axis=1)*2*np.pi) )
def theta_av(geom, var, start, zones_to_av=1, use_gdet=False, fold=True):
# Sum theta from each pole to equator and take overall mean
N2 = geom['n2']
if use_gdet:
return (var[start:start+zones_to_av,:N2//2,:] * geom['gdet'][start:start+zones_to_av,:N2//2,None]*geom['dx1']*geom['dx3'] +
var[start:start+zones_to_av,:N2//2-1:-1,:] * geom['gdet'][start:start+zones_to_av,:N2//2-1:-1,None]*geom['dx1']*geom['dx3']).sum(axis=(0,2))\
/((geom['gdet'][start:start+zones_to_av,:N2//2]*geom['dx1']).sum(axis=0)*2*np.pi)
else:
if fold:
return (var[start:start+zones_to_av,:N2//2,:].mean(axis=(0,2)) + var[start:start+zones_to_av,:N2//2-1:-1,:].mean(axis=(0,2))) / 2
else:
return var[start:start+zones_to_av,:,:].mean(axis=(0,2))
## Internal functions ##
# Completely antisymmetric 4D symbol
# TODO cache? Is this validation necessary?
def _antisym(a, b, c, d):
# Check for valid permutation
if (a < 0 or a > 3): return 100
if (b < 0 or b > 3): return 100
if (c < 0 or c > 3): return 100
if (d < 0 or d > 3): return 100
# Entries different?
if (a == b or a == c or a == d or
b == c or b == d or c == d):
return 0
return _pp([a, b, c, d])
# Due to Norm Hardy; good for general n
def _pp(P):
v = np.zeros_like(P)
p = 0
for j in range(len(P)):
if (v[j]):
p += 1
else:
x = j
while True:
x = P[x]
v[x] = 1
if x == j:
break
if p % 2 == 0:
return 1
else:
return -1
| 15,889 | 33.393939 | 155 |
py
|
iharm3d
|
iharm3d-master/script/analysis/units.py
|
## Handle adding units to quantities. Work in progress
import numpy as np
cgs = {
'CL' : 2.99792458e10,
'QE' : 4.80320680e-10,
'ME' : 9.1093826e-28,
'MP' : 1.67262171e-24,
'MN' : 1.67492728e-24,
'HPL' : 6.6260693e-27,
'HBAR' : 1.0545717e-27,
'KBOL' : 1.3806505e-16,
'GNEWT' : 6.6742e-8,
'SIG' : 5.670400e-5,
'AR' : 7.5657e-15,
'THOMSON' : 0.665245873e-24,
'JY' : 1.e-23,
'PC' : 3.085678e18,
'AU' : 1.49597870691e13,
'MSOLAR' : 1.989e33,
'RSOLAR' : 6.96e10,
'LSOLAR' : 3.827e33
}
def get_cgs():
return cgs
# Get M87 units. Pass tp_over_te=None to get non-constant-frac units
def get_units_M87(M_unit, tp_over_te=3):
L_unit = 9.15766e+14
T_unit = L_unit / cgs['CL']
return _get_all_units(M_unit, L_unit, T_unit, tp_over_te)
# Internal method for all the well-defined units
def _get_all_units(M_unit, L_unit, T_unit, tp_over_te, gam=4/3):
out = {}
out['M_unit'] = M_unit
out['L_unit'] = L_unit
out['T_unit'] = T_unit
RHO_unit = M_unit / (L_unit ** 3)
out['RHO_unit'] = RHO_unit
out['U_unit'] = RHO_unit*cgs['CL']**2;
out['B_unit'] = cgs['CL']*np.sqrt(4. * np.pi * RHO_unit)
out['Ne_unit'] = RHO_unit/(cgs['MP'] + cgs['ME'])
if tp_over_te is not None:
out['Thetae_unit'] = (gam-1.)*cgs['MP']/cgs['ME']/(1. + tp_over_te)
else:
out['Thetae_unit'] = cgs['MP']/cgs['ME']
return out
| 1,325 | 23.555556 | 71 |
py
|
iharm3d
|
iharm3d-master/script/analysis/defs.py
|
# Definitions of enums and slices used throughout the code
from enum import Enum
class Met(Enum):
"""Enum of the metrics/coordinate systems supported by HARM"""
MINKOWSKI = 0
MKS = 1
#MMKS = 2 # TODO put back support?
FMKS = 3
# Exotic metrics from KORAL et al
EKS = 4
MKS3 = 5
# For conversions, etc
KS = 6
class Loci(Enum):
"""Location enumerated value.
Locations are defined by:
^ theta
|----------------------
| |
| |
|FACE1 CENT |
| |
|CORN FACE2 |
-------------------------> R
With FACE3 as the plane in phi"""
FACE1 = 0
FACE2 = 1
FACE3 = 2
CENT = 3
CORN = 4
class Var(Enum):
"""All possible variables HARM supports. May not all be used in a given run"""
RHO = 0
UU = 1
U1 = 2
U2 = 3
U3 = 4
B1 = 5
B2 = 6
B3 = 7
KTOT = 8
KEL = 9
class Slices:
"""These slices can be constructed easily and define the bulk (all physical) fluid zones,
separately from the ghost zones used for MPI syncing and boundary conditions
Careful not to use the slices on arrays which are themselves slices of the whole! (TODO fix this requirement?)
"""
def __init__(self, G):
# Slices to represent variables, to add to below for picking out e.g. bulk of RHO
self.allv = (slice(None),)
self.RHO = (Var.RHO.value,)
self.UU = (Var.UU.value,)
self.U1 = (Var.U1.value,)
self.U2 = (Var.U2.value,)
self.U3 = (Var.U3.value,)
self.B1 = (Var.B1.value,)
self.B2 = (Var.B2.value,)
self.B3 = (Var.B3.value,)
self.KTOT = (Var.KTOT.value,)
self.KEL = (Var.KEL.value,)
# Single slices for putting together operations in bounds.py. May be replaced by loopy kernels
# Name single slices for character count
ng = G.NG
self.a = slice(None)
self.b = slice(ng, -ng)
self.bulk = (self.b, self.b, self.b)
self.all = (slice(None),slice(None),slice(None))
# "Halo" of 1 zone
self.bh1 = slice(ng - 1, -ng + 1)
self.bulkh1 = (self.bh1, self.bh1, self.bh1)
# For manual finite-differencing. Probably very slow
self.diffr1 = (slice(ng + 1, -ng + 1), self.b, self.b)
self.diffr2 = (self.b, slice(ng + 1, -ng + 1), self.b)
self.diffr3 = (self.b, self.b, slice(ng + 1, -ng + 1))
# Name boundaries slices for readability
# Left side
self.ghostl = slice(0, ng)
self.boundl = slice(ng, 2*ng)
self.boundl_r = slice(2 * ng, ng, -1) # Reverse
self.boundl_o = slice(ng, ng + 1) # Outflow (1-zone slice for replication)
# Right side
self.ghostr = slice(-ng, None)
self.boundr = slice(-2 * ng, -ng)
self.boundr_r = slice(-ng, -2 * ng, -1)
self.boundr_o = slice(-ng - 1, -ng)
def geom_slc(self, slc):
return slc[:2] + (None,)
class Shapes:
def __init__(self, G):
# Shapes for allocation
self.geom_scalar = (G.GN[1], G.GN[2])
self.geom_vector = (G.NDIM,) + self.geom_scalar
self.geom_tensor = (G.NDIM,) + self.geom_vector
self.grid_scalar = (G.GN[1], G.GN[2], G.GN[3])
self.grid_vector = (G.NDIM,) + self.grid_scalar
self.grid_tensor = (G.NDIM,) + self.grid_vector
self.bulk_scalar = (G.N[1], G.N[2], G.N[3])
self.bulk_vector = (G.NDIM,) + self.bulk_scalar
self.bulk_tensor = (G.NDIM,) + self.bulk_vector
| 3,601 | 29.525424 | 114 |
py
|
iharm3d
|
iharm3d-master/script/analysis/movie.py
|
################################################################################
# #
# GENERATE MOVIES FROM SIMULATION OUTPUT #
# #
################################################################################
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
import util
from luminosity_th_study import overlay_thphi_contours
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os, sys
import pickle
import numpy as np
# Movie size in inches. Keep 16/9 for standard-size movies
FIGX = 12
FIGY = FIGX*9/16
# For plotting debug, "array-space" plots
# Certain plots can override this below
USEARRSPACE = False
LOG_MDOT = False
LOG_PHI = False
# Load diagnostic data from post-processing (eht_out.p)
diag_post = True
def plot(n):
imname = os.path.join(frame_dir, 'frame_%08d.png' % n)
tdump = io.get_dump_time(files[n])
if (tstart is not None and tdump < tstart) or (tend is not None and tdump > tend):
return
print("{} / {}".format((n+1),len(files)))
fig = plt.figure(figsize=(FIGX, FIGY))
if movie_type not in ["simplest", "simpler", "simple"]:
dump = io.load_dump(files[n], hdr, geom, derived_vars=True, extras=False)
#fig.suptitle("t = %d"%dump['t']) # TODO put this at the bottom somehow?
else:
# Simple movies don't need derived vars
dump = io.load_dump(files[n], hdr, geom, derived_vars=False, extras=False)
# Put the somewhat crazy rho values from KORAL dumps back in plottable range
if np.max(dump['RHO']) < 1e-10:
dump['RHO'] *= 1e15
# Zoom in for small problems
if geom['r'][-1,0,0] > 100:
window = [-100,100,-100,100]
nlines = 20
rho_l, rho_h = -3, 2
iBZ = i_of(geom,100) # most MADs
rBZ = 100
else:
window = [-50,50,-50,50]
nlines = 5
rho_l, rho_h = -4, 1
iBZ = i_of(geom,40) # most SANEs
rBZ = 40
if movie_type == "simplest":
# Simplest movie: just RHO
ax_slc = [fig.add_subplot(1,2,1), fig.add_subplot(1,2,2)]
bplt.plot_xz(ax_slc[0], geom, np.log10(dump['RHO']),
label="", vmin=rho_l, vmax=rho_h, window=window,
xlabel=False, ylabel=False, xticks=False, yticks=False,
cbar=False, cmap='jet')
bplt.plot_xy(ax_slc[1], geom, np.log10(dump['RHO']),
label="", vmin=rho_l-0.5, vmax=rho_h-0.5, window=window,
xlabel=False, ylabel=False, xticks=False, yticks=False,
cbar=False, cmap='jet')
pad = 0.0
plt.subplots_adjust(hspace=0, wspace=0, left=pad, right=1-pad, bottom=pad, top=1-pad)
elif movie_type == "simpler":
# Simpler movie: RHO and phi
gs = gridspec.GridSpec(2, 2, height_ratios=[6, 1], width_ratios=[16,17])
ax_slc = [fig.add_subplot(gs[0,0]), fig.add_subplot(gs[0,1])]
ax_flux = [fig.add_subplot(gs[1,:])]
bplt.plot_slices(ax_slc[0], ax_slc[1], geom, dump, np.log10(dump['RHO']),
label=r"$\log_{10}(\rho)$", vmin=rho_l, vmax=rho_h, window=window,
overlay_field=False, cmap='jet')
bplt.diag_plot(ax_flux[0], diag, 'Phi_b', dump['t'], ylabel=r"$\phi_{BH}$", logy=LOG_PHI, xlabel=False)
elif movie_type == "simple":
# Simple movie: RHO mdot phi
gs = gridspec.GridSpec(3, 2, height_ratios=[4, 1, 1])
ax_slc = [fig.add_subplot(gs[0,0]), fig.add_subplot(gs[0,1])]
ax_flux = [fig.add_subplot(gs[1,:]), fig.add_subplot(gs[2,:])]
bplt.plot_slices(ax_slc[0], ax_slc[1], geom, dump, np.log10(dump['RHO']),
label=r"$\log_{10}(\rho)$", vmin=rho_l, vmax=rho_h, window=window, cmap='jet')
bplt.diag_plot(ax_flux[0], diag, 'Mdot', dump['t'], ylabel=r"$\dot{M}$", logy=LOG_MDOT)
bplt.diag_plot(ax_flux[1], diag, 'Phi_b', dump['t'], ylabel=r"$\phi_{BH}$", logy=LOG_PHI)
elif movie_type == "radial":
# TODO just record these in analysis output...
rho_r = eht_profile(geom, dump['RHO'], jmin, jmax)
B_r = eht_profile(geom, np.sqrt(dump['bsq']), jmin, jmax)
uphi_r = eht_profile(geom, dump['ucon'][:,:,:,3], jmin, jmax)
Pg = (hdr['gam']-1.)*dump['UU']
Pb = dump['bsq']/2
Pg_r = eht_profile(geom, Pg, jmin, jmax)
Ptot_r = eht_profile(geom, Pg + Pb, jmin, jmax)
betainv_r = eht_profile(geom, Pb/Pg, jmin, jmax)
ax_slc = lambda i: fig.add_subplot(2, 3, i)
bplt.radial_plot(ax_slc(1), geom, rho_r, ylabel=r"$<\rho>$", logy=True, ylim=[1.e-2, 1.e0])
bplt.radial_plot(ax_slc(2), geom, Pg_r, ylabel=r"$<P_g>$", logy=True, ylim=[1.e-6, 1.e-2])
bplt.radial_plot(ax_slc(3), geom, B_r, ylabel=r"$<|B|>$", logy=True, ylim=[1.e-4, 1.e-1])
bplt.radial_plot(ax_slc(4), geom, uphi_r, ylabel=r"$<u^{\phi}>$", logy=True, ylim=[1.e-3, 1.e1])
bplt.radial_plot(ax_slc(5), geom, Ptot_r, ylabel=r"$<P_{tot}>$", logy=True, ylim=[1.e-6, 1.e-2])
bplt.radial_plot(ax_slc(6), geom, betainv_r, ylabel=r"$<\beta^{-1}>$", logy=True, ylim=[1.e-2, 1.e1])
elif movie_type == "fluxes_cap":
axes = [fig.add_subplot(2, 2, i) for i in range(1,5)]
bplt.plot_thphi(axes[0], geom, np.log10(d_fns['FE'](dump)[iBZ,:,:]), iBZ, vmin=-8, vmax=-4, label =r"FE $\theta-\phi$ slice")
bplt.plot_thphi(axes[1], geom, np.log10(d_fns['FM'](dump)[iBZ,:,:]), iBZ, vmin=-8, vmax=-4, label =r"FM $\theta-\phi$ slice")
bplt.plot_thphi(axes[2], geom, np.log10(d_fns['FL'](dump)[iBZ,:,:]), iBZ, vmin=-8, vmax=-4, label =r"FL $\theta-\phi$ slice")
bplt.plot_thphi(axes[3], geom, np.log10(dump['RHO'][iBZ,:,:]), iBZ, vmin=-4, vmax=1, label =r"\rho $\theta-\phi$ slice")
for i,axis in enumerate(axes):
if i == 0:
overlay_thphi_contours(axis, geom, diag, legend=True)
else:
overlay_thphi_contours(axis, geom, diag)
max_th = geom['n2']//2
x = bplt.loop_phi(geom['x'][iBZ,:max_th,:])
y = bplt.loop_phi(geom['y'][iBZ,:max_th,:])
prep = lambda var : bplt.loop_phi(var[:max_th,:])
axis.contour(x,y, prep(geom['th'][iBZ]), [1.0], colors='k')
axis.contour(x,y, prep(d_fns['betagamma'](dump)[iBZ]), [1.0], colors='k')
axis.contour(x,y, prep(d_fns['sigma'](dump)[iBZ]), [1.0], colors='xkcd:green')
axis.contour(x,y, prep(d_fns['FE'](dump)[iBZ]), [0.0], colors='xkcd:pink')
axis.contour(x,y, prep(d_fns['Be_nob'](dump)[iBZ]), [0.02], colors='xkcd:red')
axis.contour(x,y, prep(d_fns['mu'](dump)[iBZ]), [2.0], colors='xkcd:blue')
elif movie_type == "rho_cap":
# Note cmaps are different between left 2 and right plot, due to the latter being far away from EH
bplt.plot_slices(fig.add_subplot(1,3,1), fig.add_subplot(1,3,2), geom, dump, np.log10(dump['RHO']),
label=r"$\log_{10}(\rho)$", vmin=-3, vmax=2, cmap='jet')
bplt.overlay_contours(fig.add_subplot(1,3,1), geom, geom['r'], [rBZ], color='k')
bplt.plot_thphi(fig.add_subplot(1,3,3), geom, np.log10(dump['RHO'][iBZ,:,:]), iBZ, vmin=-4, vmax=1, label=r"$\log_{10}(\rho)$ $\theta-\phi$ slice r="+str(rBZ))
elif movie_type == "funnel_wall":
rKH = 20
iKH = i_of(geom, rKH)
win=[0,rBZ/2,0,rBZ]
gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])
axes = [fig.add_subplot(gs[0,i]) for i in range(3)]
bplt.plot_xz(axes[0], geom, np.log10(dump['RHO']),
label=r"$\log_{10}(\rho)$", vmin=-3, vmax=2, cmap='jet', window=win, shading='flat')
bplt.plot_xz(axes[1], geom, np.log10(dump['ucon'][:,:,:,3]),
label=r"$\log_{10}(u^{\phi})$", vmin=-3, vmax=0, cmap='Reds', window=win, cbar=False, shading='flat')
bplt.plot_xz(axes[1], geom, np.log10(-dump['ucon'][:,:,:,3]),
label=r"$\log_{10}(u^{\phi})$", vmin=-3, vmax=0, cmap='Blues', window=win, cbar=False, shading='flat')
bplt.plot_xz(axes[2], geom, np.log10(dump['beta'][:,:,:,3]),
label=r"$\log_{10}(u_{\phi})$", vmin=-3, vmax=3, window=win, shading='flat')
for axis in axes:
bplt.overlay_field(axis, geom, dump, nlines=nlines*4)
# bplt.plot_thphi(axes[2], geom, np.log10(dump['RHO'][iKH,:,:]), iKH,
# label=r"$\log_{10}(\rho)$ $\theta-\phi$ slice r="+str(rKH), vmin=-4, vmax=1, cmap='jet', shading='flat')
elif movie_type == "kh_radii":
if True: # Half-theta (one jet) switch
awindow = [0,1,0.5,1]
bwindow = [0,rBZ/2,0,rBZ]
else:
awindow = [0,1,0,1]
bwindow = [0,rBZ/2,-rBZ/2,rBZ/2]
rlevels = [10, 20, 40, 80]
axes = [fig.add_subplot(2,3,1), fig.add_subplot(2,3,2), fig.add_subplot(2,3,4), fig.add_subplot(2,3,5)]
bigaxis = fig.add_subplot(1,3,3)
for ax,rlevel in zip(axes, rlevels):
bplt.plot_thphi(ax, geom, np.log10(dump['RHO'][i_of(geom, rlevel),:,:]), i_of(geom, rlevel),
label=r"$\log_{10}(\rho) (r = "+str(rlevel)+")$", vmin=-3, vmax=2, cmap='jet', shading='flat',
arrayspace=True, window=awindow)
# bplt.plot_xz(bigaxis, geom, np.log10(dump['RHO']), label=r"$\log_{10}(\rho) (\phi slice)$",
# vmin=-3, vmax=2, cmap='jet', shading='flat', window=bwindow)
bplt.plot_xz(bigaxis, geom, np.log10(dump['ucon'][:,:,:,3]),
label="", vmin=-3, vmax=0, cmap='Reds', window=bwindow, cbar=False, shading='flat')
bplt.plot_xz(bigaxis, geom, np.log10(-dump['ucon'][:,:,:,3]),
label=r"$\log_{10}(u^{\phi})$", vmin=-3, vmax=0, cmap='Blues', window=bwindow, shading='flat')
bplt.overlay_field(bigaxis, geom, dump)
bplt.overlay_contours(bigaxis, geom, geom['r'][:,:,0], levels=rlevels, color='r')
else: # All other movie types share a layout
ax_slc = lambda i: fig.add_subplot(2, 4, i)
ax_flux = lambda i: fig.add_subplot(4, 2, i)
if movie_type == "traditional":
# Usual movie: RHO beta fluxes
# CUTS
bplt.plot_slices(ax_slc(1), ax_slc(2), geom, dump, np.log10(dump['RHO']),
label=r"$\log_{10}(\rho)$", vmin=-3, vmax=2, cmap='jet')
bplt.plot_slices(ax_slc(5), ax_slc(6), geom, dump, np.log10(dump['beta']),
label=r"$\beta$", vmin=-2, vmax=2, cmap='RdBu_r')
# FLUXES
bplt.diag_plot(ax_flux(2), diag, 'Mdot', dump['t'], ylabel=r"$\dot{M}$", logy=LOG_MDOT)
bplt.diag_plot(ax_flux(4), diag, 'Phi_b', dump['t'], ylabel=r"$\phi_{BH}$", logy=LOG_PHI)
# Mixins:
# Zoomed in RHO
bplt.plot_slices(ax_slc(7), ax_slc(8), geom, dump, np.log10(dump['RHO']),
label=r"$\log_{10}(\rho)$", vmin=-3, vmax=2, window=[-10,10,-10,10], field_overlay=False)
# Bsq
# bplt.plot_slices(ax_slc[6], ax_slc[7], geom, dump, np.log10(dump['bsq']),
# label=r"$b^2$", vmin=-5, vmax=0, cmap='Blues')
# Failures: all failed zones, one per nonzero pflag
# bplt.plot_slices(ax_slc[6], ax_slc[7], geom, dump, dump['fail'] != 0,
# label="Failed zones", vmin=0, vmax=20, cmap='Reds', int=True) #, arrspace=True)
# 2D histograms
# bplt.hist_2d(ax_slc[6], np.log10(dump['RHO']), np.log10(dump['UU']),r"$\log_{10}(\rho)$", r"$\log_{10}(U)$", logcolor=True)
# bplt.hist_2d(ax_slc[7], np.log10(dump['UU']), np.log10(dump['bsq']),r"$\log_{10}(U)$", r"$b^2$", logcolor=True)
# Extra fluxes:
# bplt.diag_plot(ax_flux[1], diag, dump, 'edot', r"\dot{E}", logy=LOG_PHI)
elif movie_type == "e_ratio":
# Energy ratios: difficult places to integrate, with failures
bplt.plot_slices(ax_slc(0), ax_slc(1), geom, dump, np.log10(dump['UU']/dump['RHO']),
label=r"$\log_{10}(U / \rho)$", vmin=-3, vmax=3, average=True)
bplt.plot_slices(ax_slc(2), ax_slc(3), geom, dump, np.log10(dump['bsq']/dump['RHO']),
label=r"$\log_{10}(b^2 / \rho)$", vmin=-3, vmax=3, average=True)
bplt.plot_slices(ax_slc(4), ax_slc(5), geom, dump, np.log10(1/dump['beta']),
label=r"$\beta^{-1}$", vmin=-3, vmax=3, average=True)
bplt.plot_slices(ax_slc(6), ax_slc(7), geom, dump, dump['fail'] != 0,
label="Failures", vmin=0, vmax=20, cmap='Reds', int=True) #, arrspace=True)
elif movie_type == "conservation":
# Continuity plots to verify local conservation of energy, angular + linear momentum
# Integrated T01: continuity for momentum conservation
bplt.plot_slices(ax_slc[0], ax_slc[1], geom, dump, Tmixed(dump, 1, 0),
label=r"$T^1_0$ Integrated", vmin=0, vmax=600, arrspace=True, integrate=True)
# integrated T00: continuity plot for energy conservation
bplt.plot_slices(ax_slc[4], ax_slc[5], geom, dump, np.abs(Tmixed(dump, 0, 0)),
label=r"$T^0_0$ Integrated", vmin=0, vmax=3000, arrspace=True, integrate=True)
# Usual fluxes for reference
bplt.diag_plot(ax_flux[1], diag, 'Mdot', dump['t'], ylabel=r"$\dot{M}$", logy=LOG_MDOT)
#bplt.diag_plot(ax_flux[3], diag, 'Phi_b', dump['t'], ylabel=r"$\phi_{BH}$", logy=LOG_PHI)
# Radial conservation plots
E_r = sum_shell(geom,Tmixed(geom, dump, 0,0))
Ang_r = sum_shell(geom,Tmixed(geom, dump, 0,3))
mass_r = sum_shell(dump['ucon'][:,:,:,0]*dump['RHO'])
# TODO arrange legend better -- add labels when radial/diag plotting
bplt.radial_plot(ax_flux[3], geom, np.abs(E_r), 'Conserved vars at R', ylim=(0,1000), rlim=(0,20), arrayspace=True)
bplt.radial_plot(ax_flux[3], geom, np.abs(Ang_r)/10, '', ylim=(0,1000), rlim=(0,20), col='r', arrayspace=True)
bplt.radial_plot(ax_flux[3], geom, np.abs(mass_r), '', ylim=(0,1000), rlim=(0,20), col='b', arrayspace=True)
# Radial energy accretion rate
Edot_r = sum_shell(geom, Tmixed(geom, dump,1,0))
bplt.radial_plot(ax_flux[5], geom, np.abs(Edot_r), 'Edot at R', ylim=(0,200), rlim=(0,20), arrayspace=True)
# Radial integrated failures
bplt.radial_plot(ax_flux[7], geom, (dump['fail'] != 0).sum(axis=(1,2)), 'Fails at R', arrayspace=True, rlim=[0,50], ylim=[0,1000])
elif movie_type == "floors":
# TODO add measures of all floors' efficacy. Record ceilings in header or extras?
bplt.plot_slices(ax_flux[0], ax_flux[1], geom, dump['bsq']/dump['RHO'] - 100,
vmin=-100, vmax=100, cmap='RdBu_r')
bplt.diag_plot(ax, diag, dump, 'sigma_max', 'sigma_max')
elif movie_type in d_fns: # Hail mary for plotting new functions one at a time
axes = [fig.add_subplot(1,2,1), fig.add_subplot(1,2,2)]
win=[l*2 for l in window]
var = d_fns[movie_type](dump)
bplt.plot_slices(axes[0], axes[1], geom, dump, np.log10(var), vmin=-3, vmax=3, cmap='Reds', window=win)
bplt.plot_slices(axes[0], axes[1], geom, dump, np.log10(-var), vmin=-3, vmax=3, cmap='Blues', window=win)
else:
print("Movie type not known!")
exit(1)
# Extra padding for crowded 4x2 plots
pad = 0.03
plt.subplots_adjust(left=pad, right=1-pad, bottom=pad, top=1-pad)
plt.savefig(imname, dpi=1920/FIGX) # TODO the group projector is like 4:3 man
plt.close(fig)
dump.clear()
del dump
if __name__ == "__main__":
# PROCESS ARGUMENTS
if sys.argv[1] == '-d':
debug = True
movie_type = sys.argv[2]
path = sys.argv[3]
if len(sys.argv) > 4:
tstart = float(sys.argv[4])
if len(sys.argv) > 5:
tend = float(sys.argv[5])
else:
debug = False
movie_type = sys.argv[1]
path = sys.argv[2]
if len(sys.argv) > 3:
tstart = float(sys.argv[3])
if len(sys.argv) > 4:
tend = float(sys.argv[4])
# LOAD FILES
files = io.get_dumps_list(path)
if len(files) == 0:
util.warn("INVALID PATH TO DUMP FOLDER")
sys.exit(1)
frame_dir = "frames_"+movie_type
util.make_dir(frame_dir)
hdr = io.load_hdr(files[0])
geom = io.load_geom(hdr, path)
jmin, jmax = get_j_vals(geom)
#print("jmin: {} jmax: {}".format(jmin, jmax))
if diag_post:
# Load fluxes from post-analysis: more flexible
diag = pickle.load(open("eht_out.p", 'rb'))
else:
# Load diagnostics from HARM itself
diag = io.load_log(path)
nthreads = util.calc_nthreads(hdr, pad=0.3)
if debug:
# Run sequentially to make backtraces work
for i in range(len(files)):
plot(i)
else:
util.run_parallel(plot, len(files), nthreads)
| 16,449 | 46.40634 | 163 |
py
|
iharm3d
|
iharm3d-master/script/analysis/movie_compare.py
|
################################################################################
# #
# GENERATE MOVIES COMPARING 2 SIMULATIONS' OUTPUT #
# #
################################################################################
# Local
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
import util
# System
import sys; sys.dont_write_bytecode = True
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import pickle
# Movie size in ~inches. Keep 16/9 for standard-size movies
FIGX = 10
FIGY = 8
#FIGY = FIGX*9/16
def plot(n):
imname = os.path.join(frame_dir, 'frame_%08d.png' % n)
tdump = io.get_dump_time(files1[n])
if (tstart is not None and tdump < tstart) or (tend is not None and tdump > tend):
return
# Don't calculate b/ucon/cov/e- stuff unless we need it below
dump1 = io.load_dump(files1[n], hdr1, geom1, derived_vars = False, extras = False)
dump2 = io.load_dump(files2[n], hdr2, geom2, derived_vars = False, extras = False)
fig = plt.figure(figsize=(FIGX, FIGY))
# Keep same parameters betwen plots, even of SANE/MAD
rho_l, rho_h = -3, 2
window = [-20,20,-20,20]
nlines1 = 20
nlines2 = 5
# But BZ stuff is done individually
if hdr1['r_out'] < 100:
iBZ1 = i_of(geom1,40) # most SANEs
rBZ1 = 40
else:
iBZ1 = i_of(geom1,100) # most MADs
rBZ1 = 100
if hdr2['r_out'] < 100:
iBZ2 = i_of(geom2,40)
rBZ2 = 40
else:
iBZ2 = i_of(geom2,100)
rBZ2 = 100
if movie_type == "simplest":
# Simplest movie: just RHO
gs = gridspec.GridSpec(1, 2)
ax_slc = [plt.subplot(gs[0]), plt.subplot(gs[1])]
bplt.plot_xz(ax_slc[0], geom, np.log10(dump1['RHO']), label=r"$\log_{10}(\rho)$, MAD",
ylabel=False, vmin=rho_l, vmax=rho_h, window=window, half_cut=True, cmap='jet')
bplt.overlay_field(ax_slc[0], geom, dump1, nlines1)
bplt.plot_xz(ax_slc[1], geom, np.log10(dump2['RHO']), label=r"$\log_{10}(\rho)$, SANE",
ylabel=False, vmin=rho_l, vmax=rho_h, window=window, half_cut=True, cmap='jet')
bplt.overlay_field(ax_slc[1], geom, dump2, nlines2)
elif movie_type == "simpler":
# Simpler movie: RHO and phi
gs = gridspec.GridSpec(2, 2, height_ratios=[5, 1])
ax_slc = [plt.subplot(gs[0,0]), plt.subplot(gs[0,1])]
ax_flux = [plt.subplot(gs[1,:])]
bplt.plot_xz(ax_slc[0], geom, np.log10(dump1['RHO']), label=r"$\log_{10}(\rho)$, MAD",
ylabel=False, vmin=rho_l, vmax=rho_h, window=window, cmap='jet')
bplt.overlay_field(ax_slc[0], geom, dump1, nlines1)
bplt.plot_xz(ax_slc[1], geom, np.log10(dump2['RHO']), label=r"$\log_{10}(\rho)$, SANE",
ylabel=False, vmin=rho_l, vmax=rho_h, window=window, cmap='jet')
bplt.overlay_field(ax_slc[1], geom, dump2, nlines2)
# This is way too custom
ax = ax_flux[0]; ylim=[0,80]
slc1 = np.where((diag1['phi'] > ylim[0]) & (diag1['phi'] < ylim[1]))
slc2 = np.where((diag2['phi'] > ylim[0]) & (diag2['phi'] < ylim[1]))
ax.plot(diag1['t'][slc1], diag1['phi'][slc1], 'r', label="MAD")
ax.plot(diag2['t'][slc2], diag2['phi'][slc2], 'b', label="SANE")
ax.set_xlim([diag1['t'][0], diag1['t'][-1]])
ax.axvline(dump1['t'], color='r')
ax.set_ylim(ylim)
ax.set_ylabel(r"$\phi_{BH}$")
ax.legend(loc=2)
elif movie_type == "rho_cap":
axes = [plt.subplot(2,3,i) for i in range(1,7)]
bplt.plot_slices(axes[0], axes[1], geom1, dump1, np.log10(dump1['RHO']),
label=r"$\log_{10}(\rho) (1)$", vmin=-3, vmax=2, cmap='jet')
bplt.overlay_contours(axes[0], geom1, geom1['r'], [rBZ1], color='k')
bplt.plot_thphi(axes[2], geom1, np.log10(dump1['RHO'][iBZ1,:,:]), iBZ1, vmin=-4, vmax=1,
label=r"$\log_{10}(\rho)$ $\theta-\phi$ slice r="+str(rBZ1)+" (1)")
bplt.plot_slices(axes[3], axes[4], geom2, dump2, np.log10(dump2['RHO']),
label=r"$\log_{10}(\rho) (2)$", vmin=-3, vmax=2, cmap='jet')
bplt.overlay_contours(axes[3], geom2, geom2['r'], [rBZ2], color='k')
bplt.plot_thphi(axes[5], geom2, np.log10(dump2['RHO'][iBZ2,:,:]), iBZ2, vmin=-4, vmax=1,
label=r"$\log_{10}(\rho)$ $\theta-\phi$ slice r="+str(rBZ2)+" (2)")
pad = 0.05
plt.subplots_adjust(left=pad, right=1-pad, bottom=2*pad, top=1-pad)
plt.savefig(imname, dpi=1920/FIGX)
plt.close(fig)
if __name__ == "__main__":
# PROCESS ARGUMENTS
if sys.argv[1] == '-d':
debug = True
movie_type = sys.argv[2]
path1 = sys.argv[3]
path2 = sys.argv[4]
if len(sys.argv) > 5:
tstart = float(sys.argv[5])
if len(sys.argv) > 6:
tend = float(sys.argv[6])
else:
debug = False
movie_type = sys.argv[1]
path1 = sys.argv[2]
path2 = sys.argv[3]
if len(sys.argv) > 4:
tstart = float(sys.argv[4])
if len(sys.argv) > 5:
tend = float(sys.argv[5])
# LOAD FILES
files1 = io.get_dumps_list(path1)
files2 = io.get_dumps_list(path2)
if len(files1) == 0 or len(files2) == 0:
util.warn("INVALID PATH TO DUMP FOLDER")
sys.exit(1)
frame_dir = "frames_compare_"+movie_type
util.make_dir(frame_dir)
hdr1 = io.load_hdr(files1[0])
hdr2 = io.load_hdr(files2[0])
geom1 = io.load_geom(hdr1, path1)
geom2 = io.load_geom(hdr2, path2)
# TODO diags from post?
# Load diagnostics from HARM itself
diag1 = io.load_log(path1)
diag2 = io.load_log(path2)
nthreads = util.calc_nthreads(hdr1)
if debug:
for i in range(len(files1)):
plot(i)
else:
util.run_parallel(plot, len(files1), nthreads)
| 5,780 | 34.466258 | 96 |
py
|
iharm3d
|
iharm3d-master/script/analysis/quick_plot.py
|
################################################################################
# #
# PLOT ONE PRIMITIVE #
# #
################################################################################
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
import units
import matplotlib
import matplotlib.pyplot as plt
import sys
import numpy as np
from scipy.signal import convolve2d
# TODO parse lots of options I set here
USEARRSPACE=False
UNITS=False
SIZE = 100
window=[-SIZE,SIZE,-SIZE,SIZE]
#window=[-SIZE/4,SIZE/4,0,SIZE]
FIGX = 10
FIGY = 10
dumpfile = sys.argv[1]
if len(sys.argv) > 3:
gridfile = sys.argv[2]
var = sys.argv[3]
elif len(sys.argv) > 2:
gridfile = None
var = sys.argv[2]
# Optionally take extra name
name = sys.argv[-1]
if UNITS and var not in ['Tp']:
M_unit = float(sys.argv[-1])
if gridfile is not None:
hdr = io.load_hdr(dumpfile)
geom = io.load_geom(hdr, gridfile)
dump = io.load_dump(dumpfile, hdr, geom)
else:
# Assumes gridfile in same directory
hdr,geom,dump = io.load_all(dumpfile)
# If we're plotting a derived variable, calculate + add it
if var in ['jcov', 'jsq']:
dump['jcov'] = np.einsum("...i,...ij->...j", dump['jcon'], geom['gcov'][:,:,None,:,n])
dump['jsq'] = np.sum(dump['jcon']*dump['jcov'], axis=-1)
elif var in ['divE2D']:
JE1g, JE2g = T_mixed(dump, 1,0).mean(axis=-1)*geom['gdet'], T_mixed(dump, 2,0).mean(axis=-1)*geom['gdet']
face_JE1 = 0.5*(JE1g[:-1,:] + JE1g[1:,:])
face_JE2 = 0.5*(JE2g[:,:-1] + JE2g[:,1:])
divJE = (face_JE1[1:,1:-1] - face_JE1[:-1,1:-1]) / geom['dx1'] + (face_JE2[1:-1,1:] - face_JE2[1:-1,:-1]) / geom['dx2']
dump[var] = np.zeros_like(dump['RHO'])
dump[var][1:-1,1:-1,0] = divJE
dump[var] /= np.sqrt(T_mixed(dump, 1,0)**2 + T_mixed(dump, 2,0)**2 + T_mixed(dump, 3,0)**2)*geom['gdet'][:,:,None]
elif var in ['divB2D']:
B1g, B2g = dump['B1'].mean(axis=-1)*geom['gdet'], dump['B2'].mean(axis=-1)*geom['gdet']
corner_B1 = 0.5*(B1g[:,1:] + B1g[:,:-1])
corner_B2 = 0.5*(B2g[1:,:] + B2g[:-1,:])
divB = (corner_B1[1:,:] - corner_B1[:-1,:]) / geom['dx1'] + (corner_B2[:,1:] - corner_B2[:,:-1]) / geom['dx2']
dump[var] = np.zeros_like(dump['RHO'])
dump[var][:-1,:-1,0] = divB
dump[var] /= np.sqrt(dump['B1']**2 + dump['B2']**2 + dump['B3']**2)*geom['gdet'][:,:,None]
elif var in ['divB3D']:
B1g, B2g, B3g = dump['B1']*geom['gdet'][:,:,None], dump['B2']*geom['gdet'][:,:,None], dump['B3']*geom['gdet'][:,:,None]
corner_B1 = 0.25*(B1g[:,1:,1:] + B1g[:,1:,:-1] + B1g[:,:-1,1:] + B1g[:,:-1,:-1])
corner_B2 = 0.25*(B2g[1:,:,1:] + B2g[1:,:,:-1] + B2g[:-1,:,1:] + B2g[:-1,:,:-1])
corner_B3 = 0.25*(B3g[1:,1:,:] + B3g[1:,:-1,:] + B3g[:-1,1:,:] + B3g[:-1,:-1,:])
divB = (corner_B1[1:,:,:] - corner_B1[:-1,:,:]) / geom['dx1'] + (corner_B2[:,1:,:] - corner_B2[:,:-1,:]) / geom['dx2'] + (corner_B3[:,:,1:] - corner_B3[:,:,:-1]) / geom['dx3']
dump[var] = np.zeros_like(dump['RHO'])
dump[var][:-1,:-1,:-1] = divB
dump[var] /= np.sqrt(dump['B1']**2 + dump['B2']**2 + dump['B3']**2)*geom['gdet'][:,:,None]
elif var[-4:] == "_pdf":
var_og = var[:-4]
dump[var_og] = d_fns[var_og](dump)
dump[var], dump[var+'_bins'] = np.histogram(np.log10(dump[var_og]), bins=200, range=(-3.5,3.5), weights=np.repeat(geom['gdet'], geom['n3']).reshape(dump[var_og].shape), density=True)
elif var not in dump:
dump[var] = d_fns[var](dump)
# Add units after all calculations, manually
if UNITS:
if var in ['Tp']:
cgs = units.get_cgs()
dump[var] /= cgs['KBOL']
else:
unit = units.get_units_M87(M_unit, tp_over_te=3)
if var in ['bsq']:
dump[var] *= unit['B_unit']**2
elif var in ['B']:
dump[var] *= unit['B_unit']
elif var in ['Ne']:
dump[var] = dump['RHO'] * unit['Ne_unit']
elif var in ['Te']:
dump[var] = ref['ME'] * ref['CL']**2 * unit['Thetae_unit'] * dump['UU']/dump['RHO']
elif var in ['Thetae']:
# TODO non-const te
dump[var] = unit['Thetae_unit'] * dump['UU']/dump['RHO']
fig = plt.figure(figsize=(FIGX, FIGY))
# Treat PDFs separately
if var[-4:] == "_pdf":
plt.plot(dump[var+'_bins'][:-1], dump[var])
plt.title("PDF of "+var[:-4])
plt.xlabel("Log10 value")
plt.ylabel("Probability")
plt.savefig(name+".png", dpi=100)
plt.close(fig)
exit()
# Plot XY differently for vectors, scalars
if var in ['jcon','ucon','ucov','bcon','bcov']:
axes = [plt.subplot(2, 2, i) for i in range(1,5)]
for n in range(4):
bplt.plot_xy(axes[n], geom, np.log10(dump[var][:,:,:,n]), arrayspace=USEARRSPACE, window=window)
elif var not in ['divE2D', 'divB2D']:
# TODO allow specifying vmin/max, average from command line or above
ax = plt.subplot(1, 1, 1)
bplt.plot_xy(ax, geom, dump[var], arrayspace=USEARRSPACE, window=window, vmin=1e10, vmax=1e12)
plt.tight_layout()
plt.savefig(name+"_xy.png", dpi=100)
plt.close(fig)
fig = plt.figure(figsize=(FIGX, FIGY))
# Plot XZ
if var in ['jcon', 'ucon', 'ucov', 'bcon', 'bcov']:
axes = [plt.subplot(2, 2, i) for i in range(1, 5)]
for n in range(4):
bplt.plot_xz(axes[n], geom, np.log10(dump[var][:,:,:,n]), arrayspace=USEARRSPACE, window=window)
elif var in ['divB2D', 'divE2D', 'divE2D_face', 'divB3D']:
ax = plt.subplot(1, 1, 1)
bplt.plot_xz(ax, geom, np.log10(np.abs(dump[var])), arrayspace=USEARRSPACE, window=window, vmin=-6, vmax=0)
if var in ['divE2D', 'divE2D_face']:
#JE1 = -T_mixed(dump, 1,0)
#JE2 = -T_mixed(dump, 2,0)
JE1 = dump['ucon'][:,:,:,1]
JE2 = dump['ucon'][:,:,:,2]
bplt.overlay_flowlines(ax, geom, JE1, JE2, nlines=20, arrayspace=USEARRSPACE)
#bplt.overlay_quiver(ax, geom, JE1, JE2)
else:
bplt.overlay_field(ax, geom, dump, nlines=20, arrayspace=USEARRSPACE)
else:
ax = plt.subplot(1, 1, 1)
bplt.plot_xz(ax, geom, np.log10(dump[var]), vmin=-3, vmax=1, arrayspace=USEARRSPACE, window=window)
norm = np.sqrt(dump['ucon'][:,:,0,1]**2 + dump['ucon'][:,:,0,2]**2)*geom['gdet']
JF1 = dump['ucon'][:,:,:,1] #/norm
JF2 = dump['ucon'][:,:,:,2] #/norm
#bplt.overlay_quiver(ax, geom, dump, JF1, JF2, cadence=96, norm=15)
bplt.overlay_flowlines(ax, geom, JF1, JF2, nlines=100, arrayspace=USEARRSPACE, reverse=True)
plt.tight_layout()
plt.savefig(name+"_xz.png", dpi=100)
plt.close(fig)
| 6,425 | 37.023669 | 184 |
py
|
iharm3d
|
iharm3d-master/script/analysis/luminosity_th_study.py
|
#!/usr/bin/env python3
import os, sys
import pickle
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import util
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
from defs import Met, Loci
from coordinates import dxdX_to_KS, dxdX_KS_to
FIGX=15
FIGY=15
# Decide where to measure fluxes
def i_of(geom, rcoord):
i = 0
while geom['r'][i,geom['n2']//2,0] < rcoord:
i += 1
i -= 1
return i
def cut_pos(var, cut):
if var.ndim > 2:
var_X2 = bplt.flatten_xz(var, average=True)[iBZ,:hdr['n2']//2]
else:
var_X2 = var
var_cut = np.where( np.logical_or(
np.logical_and(var_X2[:-1] > cut, var_X2[1:] < cut),
np.logical_and(var_X2[:-1] < cut, var_X2[1:] > cut)))
return var_cut
def overlay_thphi_contours(ax, geom, avg, legend=False):
if geom['r_out'] < 100:
iBZ = i_of(geom, 40)
else:
iBZ = i_of(geom, 100)
max_th = geom['n2']//2
x = bplt.loop_phi(geom['x'][iBZ,:max_th,:])
y = bplt.loop_phi(geom['y'][iBZ,:max_th,:])
prep = lambda var : bplt.loop_phi(var[:max_th,:])
cntrs = []
cntrs.append(ax.contour(x,y, prep(geom['th'][iBZ,:,:]), [1.0], colors='k'))
cntrs.append(ax.contour(x,y, prep(avg['betagamma_100_thphi']), [1.0], colors='k'))
cntrs.append(ax.contour(x,y, prep(avg['bsq_100_thphi']/avg['rho_100_thphi']), [1.0], colors='xkcd:green'))
cntrs.append(ax.contour(x,y, prep(avg['FE_100_thphi']), [0.0], colors='xkcd:pink'))
cntrs.append(ax.contour(x,y, prep(avg['Be_nob_100_thphi']), [0.02], colors='xkcd:red'))
cntrs.append(ax.contour(x,y, prep(avg['mu_100_thphi']), [2.0], colors='xkcd:blue'))
clegends = [cnt.legend_elements()[0][0] for cnt in cntrs]
if legend: ax.legend(clegends, [r"$\theta$ = 1", r"$\beta\gamma$ = 1", r"$\sigma$ = 1", r"FE = 0", r"Be = 0.02", r"$\mu$ = 2"])
def overlay_rth_contours(ax, geom, avg, legend=False):
cntrs = []
cntrs.append(bplt.overlay_contours(ax, geom, geom['th'][:,:,0], [1.0, np.pi-1.0], color='k'))
cntrs.append(bplt.overlay_contours(ax, geom, avg['betagamma_rth'], [1.0], color='k'))
cntrs.append(bplt.overlay_contours(ax, geom, avg['bsq_rth']/avg['rho_rth'], [1.0], color='xkcd:green'))
cntrs.append(bplt.overlay_contours(ax, geom, avg['FE_rth'], [0.0], color='xkcd:pink'))
cntrs.append(bplt.overlay_contours(ax, geom, avg['Be_nob_rth'], [0.02], color='xkcd:red'))
cntrs.append(bplt.overlay_contours(ax, geom, avg['mu_rth'], [2.0], color='xkcd:blue'))
clegends = [cnt.legend_elements()[0][0] for cnt in cntrs]
if legend: ax.legend(clegends, [r"$\theta$ = 1", r"$\beta\gamma$ = 1", r"$\sigma$ = 1", r"FE = 0", r"Be = 0.02", r"$\mu$ = 2"], loc='upper right')
def overlay_th_contours(ax, avg):
th_cut1 = cut_pos(avg['th100'][:hdr['n2']//2], 1.0)
bg_cut1 = cut_pos(np.mean(avg['betagamma_100_thphi'], axis=-1)[:hdr['n2']//2], 1.0)
sigma_cut1 = cut_pos(np.mean(avg['bsq_100_thphi']/avg['rho_100_thphi'],axis=-1)[:hdr['n2']//2], 1.0)
fe_cut0 = cut_pos(np.mean(avg['FE_100_thphi'],axis=-1)[:hdr['n2']//2], 0.0)
be_nob0_cut = cut_pos(avg['Be_nob_100_th'][:hdr['n2']//2]/hdr['n3'], 0.02)
mu_cut2 = cut_pos(avg['mu_100_thphi'][:hdr['n2']//2]/hdr['n3'], 2.0)
ylim = ax.get_ylim()
ax.vlines(avg['th100'][th_cut1], ylim[0], ylim[1], colors='k', label=r"$\theta$ = 1")
ax.vlines(avg['th100'][bg_cut1], ylim[0], ylim[1], colors='k', label=r"$\beta\gamma$ = 1")
ax.vlines(avg['th100'][sigma_cut1], ylim[0], ylim[1], colors='xkcd:green', label=r"$\sigma$ = 1")
ax.vlines(avg['th100'][fe_cut0], ylim[0], ylim[1], colors='xkcd:pink', label=r"FE = 0")
ax.vlines(avg['th100'][be_nob0_cut], ylim[0], ylim[1], colors='xkcd:red', label=r"Be = 0.02")
#ax.vlines(avg['th100'][mu_cut2], ylim[0], ylim[1], colors='xkcd:blue', label=r"$\mu$ = 2")
if __name__ == "__main__":
run_name = sys.argv[1]
dumpfile = os.path.join("/scratch/03002/bprather/pharm_dumps/M87SimulationLibrary/GRMHD",run_name,"dumps/dump_00001500.h5")
hdr,geom,dump = io.load_all(dumpfile)
plotfile = os.path.join("/work/03002/bprather/stampede2/movies",run_name,"eht_out.p")
avg = pickle.load(open(plotfile, "rb"))
# BZ luminosity; see eht_analysis
if hdr['r_out'] < 100:
iBZ = i_of(geom, 40) # most SANEs
rBZ = 40
rstring = "40"
else:
iBZ = i_of(geom, 100) # most MADs
rBZ = 100
rstring = "100"
# For converting differentials to theta
avg['X2'] = geom['X2'][iBZ,:,0]
Xgeom = np.zeros((4,1,geom['n2']))
Xgeom[1] = geom['X1'][iBZ,:,0]
Xgeom[2] = geom['X2'][iBZ,:,0]
to_dth_bz = dxdX_to_KS(Xgeom, Met.FMKS, geom)[2,2,0]
ND = avg['t'].shape[0]
# I can rely on this for now
start = int(avg['avg_start'])//5
end = int(avg['avg_end'])//5
avg['th100'] = geom['th'][iBZ,:,0]
avg['hth100'] = geom['th'][iBZ,:hdr['n2']//2,0]
# Write OG values for comparison/table import. To obtain values identical to George's, measure 2 zones outward
with open("average_"+run_name.replace("/","_").split("x")[0]+".dat", "w") as datf:
datf.write("# x2 theta dx2/dtheta gdet rho bsq Fem_t Ffl_t F_mass\n")
for i in range(hdr['n2']):
datf.write("{} {} {} {} {} {} {} {} {}\n".format(avg['X2'][i], avg['th100'][i], to_dth_bz[i], geom['gdet'][iBZ,i],
avg['rho_100_th'][i]/hdr['n3'], avg['bsq_100_th'][i]/hdr['n3'],
-avg['FE_EM_100_th'][i]/hdr['n3'], -avg['FE_Fl_100_th'][i]/hdr['n3'],
avg['FM_100_th'][i]/hdr['n3']))
# Add geometric factors to raw sum
for key in avg.keys():
if key[-7:] == "_100_th":
avg[key] *= geom['gdet'][iBZ,:]*hdr['dx3']
start = int(avg['avg_start'])//5
end = int(avg['avg_end'])//5
print("Compare sigma > 1: {} vs {}".format(np.mean(avg['Lj_sigma1'][start:end]),
hdr['dx2']*np.sum(avg['FE_100_th'][np.where(avg['bsq_100_th']/avg['rho_100_th'] > 1)])))
print("Compare FE > 0: {} vs {}".format(np.mean(avg['Lj_allp'][start:end]),
hdr['dx2']*np.sum(avg['FE_100_th'][np.where(avg['FE_100_th'] > 0)])))
print("Compare bg > 1: {} vs {}".format(np.mean(avg['Lj_bg1'][start:end]),
hdr['dx2']*np.sum(avg['FE_100_th'][np.where(avg['betagamma_100_th'] > 1)])))
# Convert for plotting in theta
for key in avg.keys():
if key[-7:] == "_100_th":
avg[key] *= to_dth_bz
# L_th
fig, axes = plt.subplots(2,2, figsize=(FIGX, FIGY))
# Plot Luminosity contribution (incl. KE) as a fn of theta at r=100
ax = axes[0,0]
ax.plot(avg['hth100'], avg['FE_100_th'][:hdr['n2']//2], color='C0', label=r"$\frac{d FE_{tot}}{d\theta}$")
ax.plot(avg['hth100'], avg['FE_EM_100_th'][:hdr['n2']//2], color='C1', label=r"$\frac{d FE_{EM}}{d\theta}$")
ax.plot(avg['hth100'], avg['FE_Fl_100_th'][:hdr['n2']//2], color='C3', label=r"$\frac{d FE_{Fl}}{d\theta}$")
prop = np.sum(avg['FE_100_th'][np.where(avg['FE_100_th'] > 0)])/np.max(avg['FE_100_th'])
Ltot_th_acc = [np.sum(avg['FE_100_th'][:n])/prop for n in range(hdr['n2']//2)]
LBZ_th_acc = [np.sum(avg['FE_EM_100_th'][:n])/prop for n in range(hdr['n2']//2)]
ax.plot(avg['hth100'], Ltot_th_acc, 'C4', label=r"Acc. $L_{tot}$")
ax.plot(avg['hth100'], LBZ_th_acc, 'C5', label=r"Acc. $L_{BZ}$")
overlay_th_contours(ax,avg)
# ax.axhline(0.0, color='k', linestyle=':')
ax.set_ylim([1e-2, None])
ax.set_yscale('log')
ax.legend(loc='upper right')
ax = axes[0,1]
ax.plot(avg['hth100'], avg['FL_100_th'][:hdr['n2']//2], color='C6', label=r"$\frac{d FL_{tot}}{d\theta}$")
ax.plot(avg['hth100'], avg['FL_EM_100_th'][:hdr['n2']//2], color='C7', label=r"$\frac{d FL_{EM}}{d\theta}$")
ax.plot(avg['hth100'], avg['FL_Fl_100_th'][:hdr['n2']//2], color='C8', label=r"$\frac{d FL_{Fl}}{d\theta}$")
ax.set_ylim([1e-2, None])
ax.set_yscale('log')
ax.legend(loc='upper right')
ax = axes[1,0]
ax.plot(avg['hth100'], avg['FM_100_th'][:hdr['n2']//2], color='k', label=r"$\frac{d FM_{tot}}{d\theta}$")
ax.set_ylim([1e-2, None])
ax.set_yscale('log')
ax.legend(loc='upper right')
ax = axes[1,1]
ax.plot(avg['hth100'], avg['rho_100_th'][:hdr['n2']//2], color='k', label=r"$\rho$")
ax.set_yscale('log')
ax.legend(loc='upper right')
plt.savefig(run_name.replace("/", "_") + '_L_th.png')
plt.close(fig)
fig, ax = plt.subplots(2,2,figsize=(FIGX, FIGY))
bplt.plot_thphi(ax[0,0], geom, np.log10(avg['FE_100_thphi']), iBZ, project=False, label = r"FE $\theta-\phi$ slice")
overlay_thphi_contours(ax[0,0], geom, avg, legend=True)
bplt.plot_thphi(ax[0,1], geom, np.log10(avg['FM_100_thphi']), iBZ, project=False, label = r"FM $\theta-\phi$ slice")
overlay_thphi_contours(ax[0,1], geom, avg)
bplt.plot_thphi(ax[1,0], geom, np.log10(avg['FL_100_thphi']), iBZ, project=False, label = r"FL $\theta-\phi$ slice")
overlay_thphi_contours(ax[1,0], geom, avg)
bplt.plot_thphi(ax[1,1], geom, np.log10(avg['rho_100_thphi']), iBZ, project=False, label = r"$\rho$ $\theta-\phi$ slice")
overlay_thphi_contours(ax[1,1], geom, avg)
plt.savefig(run_name.replace("/", "_") + '_L_100_thphi.png')
plt.close(fig)
fig, ax = plt.subplots(2,2,figsize=(FIGX, FIGY))
bplt.plot_xz(ax[0,0], geom, np.log10(avg['FE_rth']), label = "FE X-Z Slice")
overlay_rth_contours(ax[0,0], geom, avg, legend=True)
bplt.plot_xz(ax[0,1], geom, np.log10(avg['FM_rth']), label = "FM X-Z Slice")
overlay_rth_contours(ax[0,1], geom, avg)
bplt.plot_xz(ax[1,0], geom, np.log10(avg['FL_rth']), label = "FL X-Z Slice")
overlay_rth_contours(ax[1,0], geom, avg)
bplt.plot_xz(ax[1,1], geom, np.log10(avg['rho_rth']), label = "RHO X-Z Slice")
overlay_rth_contours(ax[1,1], geom, avg)
plt.savefig(run_name.replace("/", "_") + '_L_rth.png')
plt.close(fig)
| 9,874 | 42.888889 | 148 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.