repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
RAML | RAML-master/incremental/datasets/.ipynb_checkpoints/cityscapes_novel-checkpoint.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
class Cityscapes_Novel(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, novel_path, novel_no, novel_name='novel.txt', transform=None):
self.root=os.path.join(novel_path,str(novel_no))
self.root=os.path.join(self.root,novel_name)
self.transform=transform
file = open(self.root,'r').readlines()
self.images=[]
self.targets=[]
for line in file:
lines=line.strip('\n').split('\t')
self.images.append(lines[0])
self.targets.append(lines[1])
# self.targets = self.images
# print(self.images)
# print(self.images[10])
# print(self.images[102])
# print(self.images[107])
# print(self.images[197])
# print(self.images[200])
# print(self.images[207])
# print(self.images[474])
# print(self.images[486])
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
return target
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target = self.encode_target(target)
# unloader = transforms.ToPILImage()
#
# plt.figure()
# plt.imshow(unloader(image.cpu().clone()))
# plt.show()
#
# plt.figure()
# plt.imshow(target)
# plt.show()
#
# plt.figure()
# plt.imshow(target_true)
# plt.show()
#
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
# instance, counts = np.unique(target_true, False, False, True)
# print('true', instance, counts)
# return image
return image, target
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode) | 8,742 | 48.39548 | 168 | py |
RAML | RAML-master/incremental/network/_deeplab.py | import torch
from torch import nn
from torch.nn import functional as F
from .utils import _SimpleSegmentationModel, _SimpleSegmentationModel_embedding, _SimpleSegmentationModel_embedding_self_distillation,_SimpleSegmentationModel_Metric
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_metric(_SimpleSegmentationModel_Metric):
pass
class DeepLabV3_embedding(_SimpleSegmentationModel_embedding):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_embedding_self_distillation(_SimpleSegmentationModel_embedding_self_distillation):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
# class DeepLabHeadV3Plus(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
#
# self.aspp = ASPP(in_channels, aspp_dilate)
#
# self.classifier = nn.Sequential(
# nn.Conv2d(304, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# class DeepLabHead(nn.Module):
# def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHead, self).__init__()
#
# self.classifier = nn.Sequential(
# ASPP(in_channels, aspp_dilate),
# nn.Conv2d(256, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# return self.classifier( feature['out'] )
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class AtrousSeparableConvolution(nn.Module):
# """ Atrous Separable Convolution
# """
# def __init__(self, in_channels, out_channels, kernel_size,
# stride=1, padding=0, dilation=1, bias=True):
# super(AtrousSeparableConvolution, self).__init__()
# self.body = nn.Sequential(
# # Separable Conv
# nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ),
# # PointWise Conv
# nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
# )
#
# self._init_weight()
#
# def forward(self, x):
# return self.body(x)
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class ASPPConv(nn.Sequential):
# def __init__(self, in_channels, out_channels, dilation):
# modules = [
# nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)
# ]
# super(ASPPConv, self).__init__(*modules)
#
# class ASPPPooling(nn.Sequential):
# def __init__(self, in_channels, out_channels):
# super(ASPPPooling, self).__init__(
# nn.AdaptiveAvgPool2d(1),
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True))
#
# def forward(self, x):
# size = x.shape[-2:]
# x = super(ASPPPooling, self).forward(x)
# return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
#
# class ASPP(nn.Module):
# def __init__(self, in_channels, atrous_rates):
# super(ASPP, self).__init__()
# out_channels = 256
# modules = []
# modules.append(nn.Sequential(
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)))
#
# rate1, rate2, rate3 = tuple(atrous_rates)
# modules.append(ASPPConv(in_channels, out_channels, rate1))
# modules.append(ASPPConv(in_channels, out_channels, rate2))
# modules.append(ASPPConv(in_channels, out_channels, rate3))
# modules.append(ASPPPooling(in_channels, out_channels))
#
# self.convs = nn.ModuleList(modules)
#
# self.project = nn.Sequential(
# nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True),
# nn.Dropout(0.1),)
#
# def forward(self, x):
# res = []
# for conv in self.convs:
# res.append(conv(x))
# res = torch.cat(res, dim=1)
# return self.project(res)
#
#
#
# def convert_to_separable_conv(module):
# new_module = module
# if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1:
# new_module = AtrousSeparableConvolution(module.in_channels,
# module.out_channels,
# module.kernel_size,
# module.stride,
# module.padding,
# module.dilation,
# module.bias)
# for name, child in module.named_children():
# new_module.add_module(name, convert_to_separable_conv(child))
# return new_module | 8,740 | 39.281106 | 165 | py |
RAML | RAML-master/incremental/network/modeling.py | from PIL.Image import NONE
from .utils import IntermediateLayerGetter, DeepLabHeadV3Plus, DeepLabHead, DeepLabHeadV3Plus_Metric
from ._deeplab import DeepLabV3, DeepLabV3_embedding, DeepLabV3_embedding_self_distillation, DeepLabV3_metric
from .backbone import resnet
from .backbone import mobilenetv2
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
if output_stride==8:
replace_stride_with_dilation=[False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation=[False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if name=='deeplabv3plus':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
elif name=='deeplabv3plus_embedding':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3plus_embedding_self_distillation':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
elif name=='deeplabv3plus_metirc_resnet101':
return_layers= {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus_Metric(inplanes, low_level_planes, num_classes, aspp_dilate,finetune)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
if name=='deeplabv3plus_embedding':
model = DeepLabV3_embedding(backbone, classifier)
elif name== 'deeplabv3plus_embedding_self_distillation':
model = DeepLabV3_embedding_self_distillation(backbone)
elif name== 'deeplabv3plus_metirc_resnet101':
model = DeepLabV3_metric(backbone,classifier,finetune)
else:
model = DeepLabV3(backbone, classifier)
return model
def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride==8:
aspp_dilate = [12, 24, 36]
else:
aspp_dilate = [6, 12, 18]
backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
# rename layers
backbone.low_level_features = backbone.features[0:4]
backbone.high_level_features = backbone.features[4:-1]
backbone.features = None
backbone.classifier = None
inplanes = 320
low_level_planes = 24
if name=='deeplabv3plus':
return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'high_level_features': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
if backbone=='mobilenetv2':
model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
elif backbone.startswith('resnet'):
model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune)
else:
raise NotImplementedError
return model
# Deeplab v3
def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
"""Constructs a DeepLabV3 model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
# Deeplab v3+
def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_embedding_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus_embedding', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_embedding_self_distillation_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus_embedding_self_distillation', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_metirc_resnet101(num_classes=21, metric_dim=64, output_stride=8, pretrained_backbone=True, finetune=False):
return _load_model('deeplabv3plus_metirc_resnet101', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune) | 8,134 | 44.194444 | 194 | py |
RAML | RAML-master/incremental/network/utils.py | from re import M
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from collections import OrderedDict
import json
class DeepLabHeadV3Plus_Metric(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36], finetune=False):
super(DeepLabHeadV3Plus_Metric, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.num_meta_channel = 4
#self.num_meta_channel = 2
#self.num_meta_channel = 6
# self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
self.bn=nn.BatchNorm2d(256)
self.relu=nn.ReLU(inplace=True)
#self.conv2=nn.Sequential(nn.Conv2d(256, 16, 1), nn.BatchNorm2d(16), nn.Sigmoid())
self.conv2=nn.Conv2d(256, num_classes, 1)
# ablation study : should be num_classes+self.num_meta_channel,equal to 20 in 16+3 mode
self.conv3=nn.Conv2d(num_classes, 20, 1)
self.finetune = finetune
if (self.finetune):
print("only train conv3 in classifier")
self._init_weight()
def forward(self, feature):
if (self.finetune):
with torch.no_grad():
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
else:
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
output2 = self.conv3(output1)
return output1, torch.sigmoid(output2), feature
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# class DropChannel(nn.Module):
# def __init__(self, n):
# super().__init__()
# self.n = n
# def forward(self, x):
# # x: (B, C, H, W)
# B, C, _, _ = x.shape
# m = torch.ones(B, C, 1, 1).float().to(x.device)
# if self.training:
# for i in np.random.choice(range(C), self.n, replace=False):
# m[:, i] = 0
# x = x * m
# return x, m
# class DeepLabHeadV3Plus_Metric(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus_Metric, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
# self.aspp = ASPP(in_channels, aspp_dilate)
# # self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
# self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
# self.bn=nn.BatchNorm2d(256)
# self.relu=nn.ReLU(inplace=True)
# self.conv2=nn.Conv2d(256, 16, 1)
# self.conv3=nn.Conv2d(16, 16, 1)
# self.drop3=DropChannel(4)
# self.conv4=nn.Conv2d(16+256, 4, 1)
# self._init_weight()
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# x = torch.cat([low_level_feature, output_feature], dim=1)
# x = self.conv1(x)
# x = self.bn(x)
# feature = self.relu(x)
# output1 = self.conv2(feature)
# output2 = torch.sigmoid(self.conv3(output1))
# output2, mask = self.drop3(output2)
# output2 = torch.cat([output2, torch.sigmoid(self.conv4(torch.cat([feature, output2], dim=1)))], dim=1)
# return output1, output2, feature, mask
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
class DeepLabHeadV3Plus(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHeadV3Plus, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return x
class _SimpleSegmentationModel_Metric(nn.Module):
def __init__(self, backbone, classifier, finetune=False):
super(_SimpleSegmentationModel_Metric, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.finetune = finetune
if (finetune):
print('freeze resnet backbone')
def forward(self, x):
input_shape = x.shape[-2:]
if (self.finetune):
with torch.no_grad():
features = self.backbone(x)
else:
features = self.backbone(x)
outputs1, outputs2, features = self.classifier(features)
#print(outputs1.shape, outputs2.shape)
outputs1 = F.interpolate(outputs1, size=input_shape, mode='bilinear', align_corners=False) # (B, 16, H, W)
outputs2 = F.interpolate(outputs2, size=input_shape, mode='bilinear', align_corners=False) # (B, 20, H, W)
outputs3 = (outputs2.unsqueeze(dim=1) * x.unsqueeze(dim=2)).sum(dim=2) # (B, 3, H, W)
'''
need to consider
'''
#features = F.interpolate(features,size=input_shape, mode='bilinear', align_corners=False)
return outputs1, outputs2, features, outputs3
class _SimpleSegmentationModel_embedding(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel_embedding, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.centers = torch.zeros(17, 17)
# idx = 0
# for i in range(19):
# if i <= 12 or i >=16:
# self.centers[idx] = torch.tensor(np.mean(np.array(prototype[idx]), axis=0))
# idx += 1
magnitude = 3
for i in range(17):
self.centers[i][i] = magnitude
# cnt = 0
# for i in range(17):
# if i <= 12:
# self.centers[cnt][cnt] = magnitude
# cnt += 1
# elif i > 13:
# self.centers[cnt+1][cnt] = magnitude
# cnt += 1
# self.centers[13] = torch.ones(1,16) * 3
# print(self.centers)
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers.shape)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class _SimpleSegmentationModel_embedding_self_distillation(nn.Module):
def __init__(self, backbone):
super(_SimpleSegmentationModel_embedding_self_distillation, self).__init__()
self.backbone = backbone
self.classifier_list = ['classifier']
self.cls_novel = 1
for i in range(self.cls_novel):
self.classifier_list.append('classifier_' + str(i+1))
inplanes = 2048
low_level_planes = 256
aspp_dilate = [6, 12, 18]
num_classes = 16
self.classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
for i in range(self.cls_novel):
self.__setattr__(self.classifier_list[i+1], DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes + i + 1, aspp_dilate))
self.centers = torch.zeros(17, 17)
def forward(self, x):
# for m in self.__getattr__(self.classifier_list[-1]).modules():
# if isinstance(m, nn.BatchNorm2d):
# m.train()
input_shape = x.shape[-2:]
features = self.backbone(x)
logits = []
centers = []
features_out = []
logits_0, centers_0, features_out_0 = self.forward_single(self.classifier, features, input_shape)
logits.append(logits_0)
centers.append(centers_0)
features_out.append(features_out_0)
for i in range(self.cls_novel):
classifier_temp = self.__getattr__(self.classifier_list[i+1])
logits_tmp, centers_tmp, features_out_tmp = self.forward_single(classifier_temp, features, input_shape)
logits.append(logits_tmp)
centers.append(centers_tmp)
features_out.append(features_out_tmp)
return logits, centers, features_out
def forward_single(self, classifier, features, input_shape):
x = classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class DeepLabHead(nn.Module):
def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHead, self).__init__()
self.classifier = nn.Sequential(
ASPP(in_channels, aspp_dilate),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
return self.classifier(feature['out'])
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AtrousSeparableConvolution(nn.Module):
""" Atrous Separable Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, bias=True):
super(AtrousSeparableConvolution, self).__init__()
self.body = nn.Sequential(
# Separable Conv
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=in_channels),
# PointWise Conv
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
)
self._init_weight()
def forward(self, x):
return self.body(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1), )
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
def convert_to_separable_conv(module):
new_module = module
if isinstance(module, nn.Conv2d) and module.kernel_size[0] > 1:
new_module = AtrousSeparableConvolution(module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.bias)
for name, child in module.named_children():
new_module.add_module(name, convert_to_separable_conv(child))
return new_module | 21,245 | 39.701149 | 136 | py |
RAML | RAML-master/incremental/network/__init__.py | from .modeling import *
from .utils import convert_to_separable_conv | 68 | 33.5 | 44 | py |
RAML | RAML-master/incremental/network/backbone/resnet.py | import torch
import torch.nn as nn
#from torchvision.models.utils import load_state_dict_from_url
from torch.hub import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 13,621 | 38.598837 | 107 | py |
RAML | RAML-master/incremental/network/backbone/mobilenetv2.py | from torch import nn
#from torchvision.models.utils import load_state_dict_from_url
from torch.hub import load_state_dict_from_url
import torch.nn.functional as F
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, dilation=1, groups=1):
#padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, 0, dilation=dilation, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
def fixed_padding(kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return (pad_beg, pad_end, pad_beg, pad_end)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilation, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, dilation=dilation, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
self.input_padding = fixed_padding( 3, dilation )
def forward(self, x):
x_pad = F.pad(x, self.input_padding)
if self.use_res_connect:
return x + self.conv(x_pad)
else:
return self.conv(x_pad)
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
self.output_stride = output_stride
current_stride = 1
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2)]
current_stride *= 2
dilation=1
previous_dilation = 1
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
previous_dilation = dilation
if current_stride == output_stride:
stride = 1
dilation *= s
else:
stride = s
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i==0:
features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t))
else:
features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
| 6,970 | 35.883598 | 123 | py |
RAML | RAML-master/incremental/network/backbone/__init__.py | from . import resnet
from . import mobilenetv2
| 47 | 15 | 25 | py |
RAML | RAML-master/incremental/network/.ipynb_checkpoints/utils-checkpoint.py | from re import M
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from collections import OrderedDict
import json
class DeepLabHeadV3Plus_Metric(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36], finetune=False):
super(DeepLabHeadV3Plus_Metric, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.num_meta_channel = 4
#self.num_meta_channel = 2
#self.num_meta_channel = 6
# self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
self.bn=nn.BatchNorm2d(256)
self.relu=nn.ReLU(inplace=True)
#self.conv2=nn.Sequential(nn.Conv2d(256, 16, 1), nn.BatchNorm2d(16), nn.Sigmoid())
self.conv2=nn.Conv2d(256, num_classes, 1)
# ablation study : should be num_classes+self.num_meta_channel,equal to 20 in 16+3 mode
self.conv3=nn.Conv2d(num_classes, 20, 1)
self.finetune = finetune
if (self.finetune):
print("only train conv3 in classifier")
self._init_weight()
def forward(self, feature):
if (self.finetune):
with torch.no_grad():
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
else:
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
output2 = self.conv3(output1)
return output1, torch.sigmoid(output2), feature
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# class DropChannel(nn.Module):
# def __init__(self, n):
# super().__init__()
# self.n = n
# def forward(self, x):
# # x: (B, C, H, W)
# B, C, _, _ = x.shape
# m = torch.ones(B, C, 1, 1).float().to(x.device)
# if self.training:
# for i in np.random.choice(range(C), self.n, replace=False):
# m[:, i] = 0
# x = x * m
# return x, m
# class DeepLabHeadV3Plus_Metric(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus_Metric, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
# self.aspp = ASPP(in_channels, aspp_dilate)
# # self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
# self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
# self.bn=nn.BatchNorm2d(256)
# self.relu=nn.ReLU(inplace=True)
# self.conv2=nn.Conv2d(256, 16, 1)
# self.conv3=nn.Conv2d(16, 16, 1)
# self.drop3=DropChannel(4)
# self.conv4=nn.Conv2d(16+256, 4, 1)
# self._init_weight()
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# x = torch.cat([low_level_feature, output_feature], dim=1)
# x = self.conv1(x)
# x = self.bn(x)
# feature = self.relu(x)
# output1 = self.conv2(feature)
# output2 = torch.sigmoid(self.conv3(output1))
# output2, mask = self.drop3(output2)
# output2 = torch.cat([output2, torch.sigmoid(self.conv4(torch.cat([feature, output2], dim=1)))], dim=1)
# return output1, output2, feature, mask
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
class DeepLabHeadV3Plus(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHeadV3Plus, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return x
class _SimpleSegmentationModel_Metric(nn.Module):
def __init__(self, backbone, classifier, finetune=False):
super(_SimpleSegmentationModel_Metric, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.finetune = finetune
if (finetune):
print('freeze resnet backbone')
def forward(self, x):
input_shape = x.shape[-2:]
if (self.finetune):
with torch.no_grad():
features = self.backbone(x)
else:
features = self.backbone(x)
outputs1, outputs2, features = self.classifier(features)
#print(outputs1.shape, outputs2.shape)
outputs1 = F.interpolate(outputs1, size=input_shape, mode='bilinear', align_corners=False) # (B, 16, H, W)
outputs2 = F.interpolate(outputs2, size=input_shape, mode='bilinear', align_corners=False) # (B, 20, H, W)
outputs3 = (outputs2.unsqueeze(dim=1) * x.unsqueeze(dim=2)).sum(dim=2) # (B, 3, H, W)
'''
need to consider
'''
#features = F.interpolate(features,size=input_shape, mode='bilinear', align_corners=False)
return outputs1, outputs2, features, outputs3
class _SimpleSegmentationModel_embedding(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel_embedding, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.centers = torch.zeros(17, 17)
# idx = 0
# for i in range(19):
# if i <= 12 or i >=16:
# self.centers[idx] = torch.tensor(np.mean(np.array(prototype[idx]), axis=0))
# idx += 1
magnitude = 3
for i in range(17):
self.centers[i][i] = magnitude
# cnt = 0
# for i in range(17):
# if i <= 12:
# self.centers[cnt][cnt] = magnitude
# cnt += 1
# elif i > 13:
# self.centers[cnt+1][cnt] = magnitude
# cnt += 1
# self.centers[13] = torch.ones(1,16) * 3
# print(self.centers)
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers.shape)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class _SimpleSegmentationModel_embedding_self_distillation(nn.Module):
def __init__(self, backbone):
super(_SimpleSegmentationModel_embedding_self_distillation, self).__init__()
self.backbone = backbone
self.classifier_list = ['classifier']
self.cls_novel = 1
for i in range(self.cls_novel):
self.classifier_list.append('classifier_' + str(i+1))
inplanes = 2048
low_level_planes = 256
aspp_dilate = [6, 12, 18]
num_classes = 16
self.classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
for i in range(self.cls_novel):
self.__setattr__(self.classifier_list[i+1], DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes + i + 1, aspp_dilate))
self.centers = torch.zeros(17, 17)
def forward(self, x):
# for m in self.__getattr__(self.classifier_list[-1]).modules():
# if isinstance(m, nn.BatchNorm2d):
# m.train()
input_shape = x.shape[-2:]
features = self.backbone(x)
logits = []
centers = []
features_out = []
logits_0, centers_0, features_out_0 = self.forward_single(self.classifier, features, input_shape)
logits.append(logits_0)
centers.append(centers_0)
features_out.append(features_out_0)
for i in range(self.cls_novel):
classifier_temp = self.__getattr__(self.classifier_list[i+1])
logits_tmp, centers_tmp, features_out_tmp = self.forward_single(classifier_temp, features, input_shape)
logits.append(logits_tmp)
centers.append(centers_tmp)
features_out.append(features_out_tmp)
return logits, centers, features_out
def forward_single(self, classifier, features, input_shape):
x = classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class DeepLabHead(nn.Module):
def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHead, self).__init__()
self.classifier = nn.Sequential(
ASPP(in_channels, aspp_dilate),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
return self.classifier(feature['out'])
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AtrousSeparableConvolution(nn.Module):
""" Atrous Separable Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, bias=True):
super(AtrousSeparableConvolution, self).__init__()
self.body = nn.Sequential(
# Separable Conv
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=in_channels),
# PointWise Conv
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
)
self._init_weight()
def forward(self, x):
return self.body(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1), )
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
def convert_to_separable_conv(module):
new_module = module
if isinstance(module, nn.Conv2d) and module.kernel_size[0] > 1:
new_module = AtrousSeparableConvolution(module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.bias)
for name, child in module.named_children():
new_module.add_module(name, convert_to_separable_conv(child))
return new_module | 21,245 | 39.701149 | 136 | py |
RAML | RAML-master/incremental/network/.ipynb_checkpoints/_deeplab-checkpoint.py | import torch
from torch import nn
from torch.nn import functional as F
from .utils import _SimpleSegmentationModel, _SimpleSegmentationModel_embedding, _SimpleSegmentationModel_embedding_self_distillation,_SimpleSegmentationModel_Metric
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_metric(_SimpleSegmentationModel_Metric):
pass
class DeepLabV3_embedding(_SimpleSegmentationModel_embedding):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_embedding_self_distillation(_SimpleSegmentationModel_embedding_self_distillation):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
# class DeepLabHeadV3Plus(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
#
# self.aspp = ASPP(in_channels, aspp_dilate)
#
# self.classifier = nn.Sequential(
# nn.Conv2d(304, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# class DeepLabHead(nn.Module):
# def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHead, self).__init__()
#
# self.classifier = nn.Sequential(
# ASPP(in_channels, aspp_dilate),
# nn.Conv2d(256, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# return self.classifier( feature['out'] )
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class AtrousSeparableConvolution(nn.Module):
# """ Atrous Separable Convolution
# """
# def __init__(self, in_channels, out_channels, kernel_size,
# stride=1, padding=0, dilation=1, bias=True):
# super(AtrousSeparableConvolution, self).__init__()
# self.body = nn.Sequential(
# # Separable Conv
# nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ),
# # PointWise Conv
# nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
# )
#
# self._init_weight()
#
# def forward(self, x):
# return self.body(x)
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class ASPPConv(nn.Sequential):
# def __init__(self, in_channels, out_channels, dilation):
# modules = [
# nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)
# ]
# super(ASPPConv, self).__init__(*modules)
#
# class ASPPPooling(nn.Sequential):
# def __init__(self, in_channels, out_channels):
# super(ASPPPooling, self).__init__(
# nn.AdaptiveAvgPool2d(1),
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True))
#
# def forward(self, x):
# size = x.shape[-2:]
# x = super(ASPPPooling, self).forward(x)
# return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
#
# class ASPP(nn.Module):
# def __init__(self, in_channels, atrous_rates):
# super(ASPP, self).__init__()
# out_channels = 256
# modules = []
# modules.append(nn.Sequential(
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)))
#
# rate1, rate2, rate3 = tuple(atrous_rates)
# modules.append(ASPPConv(in_channels, out_channels, rate1))
# modules.append(ASPPConv(in_channels, out_channels, rate2))
# modules.append(ASPPConv(in_channels, out_channels, rate3))
# modules.append(ASPPPooling(in_channels, out_channels))
#
# self.convs = nn.ModuleList(modules)
#
# self.project = nn.Sequential(
# nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True),
# nn.Dropout(0.1),)
#
# def forward(self, x):
# res = []
# for conv in self.convs:
# res.append(conv(x))
# res = torch.cat(res, dim=1)
# return self.project(res)
#
#
#
# def convert_to_separable_conv(module):
# new_module = module
# if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1:
# new_module = AtrousSeparableConvolution(module.in_channels,
# module.out_channels,
# module.kernel_size,
# module.stride,
# module.padding,
# module.dilation,
# module.bias)
# for name, child in module.named_children():
# new_module.add_module(name, convert_to_separable_conv(child))
# return new_module | 8,740 | 39.281106 | 165 | py |
RAML | RAML-master/incremental/network/.ipynb_checkpoints/modeling-checkpoint.py | from PIL.Image import NONE
from .utils import IntermediateLayerGetter, DeepLabHeadV3Plus, DeepLabHead, DeepLabHeadV3Plus_Metric
from ._deeplab import DeepLabV3, DeepLabV3_embedding, DeepLabV3_embedding_self_distillation, DeepLabV3_metric
from .backbone import resnet
from .backbone import mobilenetv2
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
if output_stride==8:
replace_stride_with_dilation=[False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation=[False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if name=='deeplabv3plus':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
elif name=='deeplabv3plus_embedding':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3plus_embedding_self_distillation':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
elif name=='deeplabv3plus_metirc_resnet101':
return_layers= {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus_Metric(inplanes, low_level_planes, num_classes, aspp_dilate,finetune)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
if name=='deeplabv3plus_embedding':
model = DeepLabV3_embedding(backbone, classifier)
elif name== 'deeplabv3plus_embedding_self_distillation':
model = DeepLabV3_embedding_self_distillation(backbone)
elif name== 'deeplabv3plus_metirc_resnet101':
model = DeepLabV3_metric(backbone,classifier,finetune)
else:
model = DeepLabV3(backbone, classifier)
return model
def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride==8:
aspp_dilate = [12, 24, 36]
else:
aspp_dilate = [6, 12, 18]
backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
# rename layers
backbone.low_level_features = backbone.features[0:4]
backbone.high_level_features = backbone.features[4:-1]
backbone.features = None
backbone.classifier = None
inplanes = 320
low_level_planes = 24
if name=='deeplabv3plus':
return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'high_level_features': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone, metric_dim=None, finetune=False):
if backbone=='mobilenetv2':
model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
elif backbone.startswith('resnet'):
model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune)
else:
raise NotImplementedError
return model
# Deeplab v3
def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
"""Constructs a DeepLabV3 model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
# Deeplab v3+
def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_embedding_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus_embedding', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_embedding_self_distillation_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus_embedding_self_distillation', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_metirc_resnet101(num_classes=21, metric_dim=64, output_stride=8, pretrained_backbone=True, finetune=False):
return _load_model('deeplabv3plus_metirc_resnet101', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, metric_dim=metric_dim, finetune=finetune) | 8,134 | 44.194444 | 194 | py |
RAML | RAML-master/incremental/.ipynb_checkpoints/main-checkpoint.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--finetune", action='store_true', default=False)
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=1000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=6,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=4,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=768)
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0,1',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output', help="output path")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
class BinaryDiceLoss(nn.Module):
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class MyDiceLoss(nn.Module):
def __init__(self, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.ignore_index = ignore_index
def forward(self, logit, label_lst, class_lst):
loss = 0.0
for b in range(logit.shape[0]):
logit_b = logit[b][torch.where(class_lst[b] != self.ignore_index)]
label_lst_b = label_lst[b][torch.where(class_lst[b] != self.ignore_index)]
if logit_b.shape[0]:
loss += self.dice_criterion(logit_b, label_lst_b)
return loss / logit.shape[0]
class CDiceLoss(nn.Module):
def __init__(self, known_class=16, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.bce_criterion = nn.BCELoss()
self.ignore_index = ignore_index
self.class_num=known_class
print('finetune with '+str(known_class)+" classes")
def forward(self, logit, label_lst, class_lst):
loss1 = torch.FloatTensor([0.0]).to(logit.device)
for i in range(self.class_num):
loss1 += (self.dice_criterion(logit[:, i], label_lst[:, i]) + self.bce_criterion(logit[:, i], label_lst[:, i].float()))
loss1 /= self.class_num
loss2 = 0.0
for i in range(self.class_num, logit.shape[1]):
loss2 += -torch.log((torch.mean(logit[:, i]) * 50).clamp(0, 1))
loss2 /= (logit.shape[1] - self.class_num)
loss3 = 0.0
num3 = 0
for i in range(logit.shape[1]):
for j in range(logit.shape[1]):
if i == j: continue
dice_loss = self.dice_criterion(logit[:, i], logit[:, j])
loss3 += (1.0 - dice_loss)
num3 += 1
loss3 = loss3 / num3
loss = (loss1 + loss2 + loss3) * 0.1
return {
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
}
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def visualize(image, label, logit, label_lst, class_lst, save_path=None, denorm=None):
# logit: (256, H, W)
if not isinstance(image, np.ndarray):
image = image.detach().cpu().numpy()
label = label.detach().cpu().numpy()
logit = logit.detach().cpu().numpy()
label_lst = label_lst.detach().cpu().numpy()
class_lst = class_lst.detach().cpu().numpy()
if denorm:
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
_, axarr = plt.subplots(2, (1+logit.shape[0]), figsize=(5*(1+logit.shape[0]), 10))
axarr[0][0].imshow(image)
label[label == 255] = 0
axarr[1][0].imshow(label)
for i in range(logit.shape[0]):
if i < label_lst.shape[0]:
axarr[0][1+i].imshow(label_lst[i])
axarr[1][i+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# _, axarr = plt.subplots(16, 32, figsize=(40, 20))
# for i in range(label.shape[0]):
# axarr[i//16][(i%16)*2].imshow(label[i])
# axarr[i//16][(i%16)*2].set_xticks([])
# axarr[i//16][(i%16)*2].set_yticks([])
# for i in range(logit.shape[0]):
# axarr[i//16][(i%16)*2+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# axarr[i//16][(i%16)*2+1].set_xticks([])
# axarr[i//16][(i%16)*2+1].set_yticks([])
# label[label == 255] = 19
# C = logit.shape[0]
# logit = np.argmax(logit, axis=0)
# mask = np.zeros_like(logit)
# for c in range(C):
# t = class_lst[c]
# if t == 255: t = 19
# temp = (logit == c).astype(np.uint8)
# mask = np.ones_like(logit) * t * temp + mask * (1 - temp)
# _, axarr = plt.subplots(1, 3, figsize=(15, 5))
# axarr[0].imshow(image)
# axarr[1].imshow(label)
# axarr[2].imshow(mask)
if save_path:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def val(opts, model, val_loader, device):
metrics = StreamSegMetrics(19)
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
model.eval()
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
for batch_idx, (images, labels, _, _, _) in tqdm(enumerate(val_loader)):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, _ = model(images)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
#print(labels.shape, outputs.shape)
metrics.update(labels[0].detach().cpu().numpy(), outputs)
score = metrics.get_results()
print(str(opts.num_classes)+' classes')
print(metrics.to_str(score))
def train_stage1(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
#l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
#loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
#masks = ((labels.unsqueeze(dim=1)) != 255).float()
#loss_l2 = l2_criterion(res_images, images) * 0.01
#loss['loss'] += (loss_seg + loss_l2)
##loss['loss_l2'] = loss_l2
if ("seg" not in epoch_records): epoch_records["seg"]=[]
epoch_records["seg"].append(loss_seg.cpu().data.numpy())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss_seg.backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 1000 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
'''
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
'''
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
def train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, logits, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
masks = ((labels.unsqueeze(dim=1)) != 255).float()
loss_l2 = l2_criterion(res_images, images) * 0.01
loss['loss'] += loss_l2
loss['loss'] += loss_seg
loss['loss_seg'] = loss_seg
loss['loss_l2'] = loss_l2
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 500 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
# if batch_idx % 10 == 0:
# val(opts, model, val_loader, device)
# model.train()
import torch
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel._functions import Scatter
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except Exception:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
opts.num_classes = 256
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
remain_class = 19 - len(train_dst.unknown_target)
print('class num : '+str(remain_class))
opts.num_classes=remain_class
model = model_map[opts.model](num_classes=remain_class, output_stride=opts.output_stride, metric_dim=opts.metric_dim, finetune=False)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
# Set up optimizer
if (opts.finetune):
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
else:
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CDiceLoss(remain_class).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
model_state_dict = model.state_dict()
checkpoint_state_dict = checkpoint["model_state"]
for key in checkpoint_state_dict:
if model_state_dict[key].shape != checkpoint_state_dict[key].shape:
print(key)
continue
model_state_dict[key] = checkpoint_state_dict[key]
model.load_state_dict(model_state_dict)
#model.load_state_dict(checkpoint["model_state"])
#model = nn.DataParallel(model)
device_ids=list(map(int, opts.gpu_id.split(',')))
#torch.cuda.set_device(device_ids[0])
print(device_ids)
#model = nn.DataParallel(model, device_ids=list(map(int, opts.gpu_id.split(','))))
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
#model = BalancedDataParallel(2, model, dim=0, device_ids=list(map(int, opts.gpu_id.split(','))))
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
#model = nn.DataParallel(model)
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
model.to(device)
if (opts.finetune):
train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
else:
train_stage1(opts, model, train_loader, val_loader, None, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 28,621 | 42.170437 | 171 | py |
RAML | RAML-master/incremental/.ipynb_checkpoints/main_metric-checkpoint.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes, Cityscapes_Novel
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
from sklearn.metrics import f1_score
import cv2
def convert_label_to_similarity(normed_feature: Tensor, label: Tensor) -> Tuple[Tensor, Tensor]:
similarity_matrix = normed_feature @ normed_feature.transpose(1, 0)
label_matrix = label.unsqueeze(1) == label.unsqueeze(0)
positive_matrix = label_matrix.triu(diagonal=1)
negative_matrix = label_matrix.logical_not().triu(diagonal=1)
similarity_matrix = similarity_matrix.view(-1)
positive_matrix = positive_matrix.view(-1)
negative_matrix = negative_matrix.view(-1)
return similarity_matrix[positive_matrix], similarity_matrix[negative_matrix]
class CircleLoss(nn.Module):
def __init__(self, m: float, gamma: float) -> None:
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, sp: Tensor, sn: Tensor) -> Tensor:
ap = torch.clamp_min(- sp.detach() + 1 + self.m, min=0.)
an = torch.clamp_min(sn.detach() + self.m, min=0.)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.logsumexp(logit_p, dim=0))
return loss
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=10000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=4,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=1,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=512)
parser.add_argument("--ckpt", default="output/final.pth", type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output_metric', help="output path")
parser.add_argument("--novel_dir", type=str, default='./novel/', help="novel path")
parser.add_argument("--test_mode", type=str, default='16_3', choices=['16_1','16_3','12','14'],
help="test mode")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"metric_model": metric_model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def get_spilt_center(feature,target,metric_model,label,device):
_, H, W, C = feature.shape
feature = feature.view(H,W,C) # (H*W, M)
target = target.view(H,W) # (H*W)
#feature = feature[target==label] # (N, M)
now_sum = torch.zeros(C,).to(device)
mask = target == label
print(mask.shape)
now_center_embedding=[]
mask = mask.cpu().data.numpy()
mask = mask.astype(np.uint8)
num_object, connect = cv2.connectedComponents(mask)
#novel_sum=0
for k in range(num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_mask = mask[now_connect]
now_mask = now_connect * mask
print(np.sum(now_mask))
if (np.sum(now_mask)<100): continue
print(now_mask.shape)
print(feature.shape)
now_feature=feature[now_mask==1]
print(now_feature.shape)
now_feature=now_feature.view(-1,C)
now_feature=torch.sum(now_feature,dim=0)/np.sum(now_mask)
#now_feature=torch.Tensor(now_feature).to(device)
now_embedding=metric_model.forward_feature(now_feature.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
now_center_embedding.append(now_embedding)
return now_center_embedding
def get_all_center(feature,target,metric_model,label):
_, H, W, C = feature.shape
feature = feature.view(-1,C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==label] # (N, M)
feature = torch.sum(feature, dim=0)
novel_sum = torch.sum(target == label)
now_center = feature / novel_sum
now_center_embedding = metric_model.forward_feature(now_center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return now_center_embedding
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
spilt_list=[]
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=[]
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,x,device)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
#center = center / novel_sum # (M,)
center=np.array(center)
print(center.shape)
'''
random select novel
np.random.seed(333333)
a = np.random.choice(100,1,False)
center=center[a]
print(center.shape)
'''
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
return center_embedding
'''
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=None
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
feature = feature.view(-1, C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==x] # (N, M)
feature = torch.sum(feature, dim=0)
if center is None:
center = torch.zeros(C,).to(device)
center += feature
novel_sum += torch.sum(target == x)
center = center / novel_sum # (M,)
center_embedding[x] = metric_model.forward_feature(center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return center_embedding
'''
def cosine_similarity(x,y):
num = x.dot(y.T)
denom = np.linalg.norm(x) * np.linalg.norm(y)
return num / denom
from copy import deepcopy
def align_embedding(opts, model, metric_model, train_loader, device, center_embedding, tag=None):
model.eval()
metric_model.eval()
remain_class = 19 - len(Cityscapes.unknown_target)
num = {key: 1 for key in center_embedding.keys()}
for batch_idx, (images, labels, labels_true, _, _) in tqdm(enumerate(train_loader)):
with torch.no_grad():
images = images.to(device, dtype=torch.float32)[0:1]
labels = labels.to(device, dtype=torch.long)[0:1]
labels_true = labels_true.to(device, dtype=torch.long)[0:1]
assert images.shape[0] == 1
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[remain_class:] # (3, H/4, W/4)
logits, region, connect = concat_logits(logits,250,erode=True,tag=tag)
for k in region:
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos, tmp_emb = None, None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.9:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
tmp_emb = embedding
if tmp_key is not None:
center_embedding[tmp_key] += tmp_emb
num[tmp_key] += 1
# if batch_idx > 50: break
center_embedding = {key: value / num[key] for key, value in center_embedding.items()}
return center_embedding
def concat_logits(logits, thereshold=100, erode=True, tag=None):
if (isinstance(tag,list)):
mask = np.array(tag)
logits = np.transpose(logits)
logits = logits * mask
logits = np.transpose(logits)
logits = (logits >= 0.5).astype(np.uint8)
logits = np.sum(logits,axis=0)
logits[logits>=1]=1
mask = logits == 1
logits = logits.astype(np.uint8)
if (erode == True):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
logits = cv2.dilate(logits, kernel)
logits = cv2.erode(logits, kernel)
#print(logits.shape)
num_object, connect = cv2.connectedComponents(logits)
region_list = []
for k in range(1,num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_sum = np.sum(now_connect)
#print(now_sum)
if (np.sum(now_connect) < thereshold):
mask[connect == k] = 0
continue
region_list.append(k)
logits = logits * mask
return logits, region_list, connect
def check_novel_logit(opts,model,metric_model,class_no,meta_channel_num,device,beta=0.15):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
center_embedding = {}
spilt_list=[]
channel_tag=[0]*meta_channel_num
with torch.no_grad():
print('generate novel: '+str(class_no))
center=[]
novel_dst = Cityscapes_Novel(novel_path=opts.novel_dir, novel_no=class_no, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
#image, target = novel_transform(image,target)
image = image.to(device)
target = target.to(device,dtype=torch.long)
output,logit,feature,_ = model(image)
output = torch.argmax(output[0], dim=0).detach().cpu().numpy()
mask = target == class_no
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
#print(target.shape)
#print(mask.shape)
logit = logit[0, (-meta_channel_num):]
#print(logit.shape)
logit = logit * mask
mask = mask.data.cpu().numpy()
all_sum=np.sum(mask)
logit = logit.detach().cpu().numpy()
logit = (logit >= 0.5).astype(np.uint8)
for x in range(logit.shape[0]):
if (np.sum(logit[x])>all_sum*beta): channel_tag[x]=1
#print(logit.shape)
#for x in range(channel_num):
#print(image.shape)
#image= denorm(image.detach().cpu().numpy())[0] * 255
#print(image.shape)
image = (denorm(image.detach().cpu().numpy())[0] * 255).transpose(1, 2, 0).astype(np.uint8)
'''
plt.imshow(image)
plt.show()
plt.close()
_, axarr = plt.subplots(1, logit.shape[0], figsize=(5*logit.shape[0], 5))
for i in range(logit.shape[0]):
now_logit=cv2.resize(logit[i], output.shape[::-1], interpolation=cv2.INTER_NEAREST)
axarr[i].imshow(image)
axarr[i].imshow(now_logit, alpha=0.5)
plt.show()
plt.close()
'''
'''
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,label=x)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
'''
#center = center / novel_sum # (M,)
'''
center=np.array(center)
print(center.shape)
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
'''
return channel_tag
def val(opts, model, metric_model, train_loader, val_loader, device,):
remain_class = 19 - len(Cityscapes.unknown_target)
metrics16 = StreamSegMetrics(19)
metrics19 = StreamSegMetrics(19, remain_class)
model.eval()
metric_model.eval()
if opts.save_val_results:
if not os.path.exists('results_1'):
os.mkdir('results_1')
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_id = 0
# val_save_dir = os.path.join(opts.output_dir, 'val')
# os.makedirs(val_save_dir, exist_ok=True)
# denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
if (opts.test_mode == '16_1'):
center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
else:
center_embedding = generate_novel(opts.novel_dir, Cityscapes.unknown_target, model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#using when 16+1 setting
#center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#center_embedding = align_embedding(opts, model, metric_model, train_loader, device, center_embedding)
name=['sky','person','rider','car','truck','bus','train','motorcycle','bicycle']
meta_channel_num=20-remain_class
all_tag=[0]*meta_channel_num
if (opts.test_mode == '16_1'):
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
else:
for x in Cityscapes.unknown_target:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
#using when 16+1 setting
'''
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
'''
#all_tag = np.array(all_tag)
print(all_tag)
'''
if (opts.test_only):
center_embedding = align_embedding(opts ,model, metric_model, train_loader, device, center_embedding, all_tag)
'''
miou_all=[]
miou_unknown=[]
for _, (images, labels, labels_true, _, _) in tqdm(enumerate(val_loader)):
assert images.shape[0] == 1
with torch.no_grad():
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
known_class = outputs.shape[1]
h,w=outputs.shape[2],outputs.shape[3]
#outputs = logits[:,0:known_class,:,:].clone()
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
metrics16.update(labels[0].detach().cpu().numpy(), outputs)
outputs19 = deepcopy(outputs)
# in 16 + 3 setting and 16 + 1 setting
if ('16' in opts.test_mode):
outputs19[outputs19 == 13] = 16
outputs19[outputs19 == 14] = 17
outputs19[outputs19 == 15] = 18
# in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
if ('14' in opts.test_mode):
outputs19[outputs19 == 13] = 18
outputs19[outputs19 == 12] = 17
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[known_class:] # (3, H/4, W/4)
# concat inference
logits, region, connect = concat_logits(logits, thereshold=250, tag=all_tag)
for k in region:
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.8:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
# default inference
logits = (logits >= 0.5).astype(np.uint8) # (3, H/4, W/4)
for c in range(logits.shape[0]):
logit = logits[c] # (H/4, W/4)
#Hl, Wl = logit.shape
#logit = cv2.resize(logit, (Wl//4, Hl//4), interpolation=cv2.INTER_NEAREST)
num_object, connect = cv2.connectedComponents(logit)
#connect = cv2.resize(connect, (Wl, Hl), interpolation=cv2.INTER_NEAREST)
for k in range(1, num_object+1):
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
if np.sum(mask) < 100: continue
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.75:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
#using in 16+3 setting
if ('16' in opts.test_mode):
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
if (opts.test_mode == '16_1'):
for x in range(17,19):
labels_true[labels_true==x] = 255
# using in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
# 10 -> 14 ,13 ->15
if ('14' in opts.test_mode):
labels_true[labels_true == 10] = 114
outputs19[outputs19 == 10] = 114
for x in range(13,17):
labels_true[labels_true == x] = 100+2+x
outputs19[outputs19 == x] = 100+2+x
for x in range(11,13):
labels_true[labels_true == x] = x-1
outputs19[outputs19 == x] = x-1
for x in range(17,19):
labels_true[labels_true == x] = x-5
outputs19[outputs19 == x] = x-5
for x in range(114,119):
labels_true[labels_true == x] -=100
outputs19[outputs19 == x] -=100
metrics19.update(labels_true[0].detach().cpu().numpy(), outputs19)
'''
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
'''
'''
now_all_IoU = metrics19.get_results()['Mean IoU']
now_unkown_IoU = metrics19.get_results()['Unknown IoU']
miou_all.append(now_all_IoU)
miou_unknown.append(now_unkown_IoU)
metrics19.reset()
'''
#print(labels_true.shape)
#print(outputs19.shape)
if opts.save_val_results:
assert images.shape[0] == 1
target = labels_true[0].detach().cpu().numpy()
image = images[0].detach().cpu().numpy()
pred = outputs19
#pred = pred.reshape(h,w)
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
target = train_loader.dataset.decode_target(target).astype(np.uint8)
pred = train_loader.dataset.decode_target(pred).astype(np.uint8)
#scores = (255 * scores).squeeze().astype(np.uint8)
Image.fromarray(image).save('results_1/%d_image.png' % img_id)
Image.fromarray(target).save('results_1/%d_target.png' % img_id)
Image.fromarray(pred).save('results_1/%d_pred.png' % img_id)
#Image.fromarray(scores).save('results/%d_scores.png' % img_id)
# np.save('results/%d_dis_sum.npy' % img_id, dis_sum_map
img_id += 1
score16 = metrics16.get_results()
score19 = metrics19.get_results()
now_IoU = score19['Unknown IoU']
print('16 classes')
print(metrics16.to_str(score16))
print()
print('19 classes')
print(metrics19.to_str(score19))
'''
for x in range(0,100):
print(x,miou_all[x],miou_unknown[x])
'''
return now_IoU
def train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = nn.CrossEntropyLoss().to(device)
model.eval()
metric_model.train()
epoch_records = {'f1': []}
cur_itr = 0
best_IoU = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
#val_save_dir = os.path.join(opts.output_dir, 'val')
#os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
_, _, features, _ = model(images)
labels_lst = F.interpolate(labels_lst.float(), size=features.shape[-2:], mode='nearest')
new_features, new_labels, logits = metric_model(features, labels_lst)
cir_loss = criterion(*convert_label_to_similarity(new_features, new_labels)) * 0.1
ce_loss = ce_criterion(logits, new_labels.long())
loss = {
'loss': cir_loss + ce_loss,
'cir_loss': cir_loss,
'ce_loss': ce_loss,
}
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
f1 = f1_score(new_labels.detach().cpu().numpy(),
torch.argmax(logits, dim=1).detach().cpu().numpy(),
average='macro')
epoch_records['f1'].append(f1)
if batch_idx % 100 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {'f1': []}
if cur_itr and cur_itr % 1000 == 0:
now_IoU = val(opts, model, metric_model, train_loader, val_loader, device)
if (now_IoU > best_IoU):
best_IoU = now_IoU
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'best.pth'))
print('best IoU :'+str(best_IoU))
model.eval()
metric_model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
val(opts, model, metric_model, train_loader, val_loader, device)
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
from dropblock import DropBlock2D
class MetricModel(nn.Module):
def __init__(self, known_class):
super().__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 128))
self.classifier = nn.Linear(128, known_class, bias=False)
self.known_class = known_class
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
def forward(self, feature, label_lst):
# feature: (B, 256, H, W)
# label_lst: (B, 17, H, W)
label_lst = label_lst[:, :self.known_class]
new_feature, new_label = [], []
for _ in range(self.known_class):
tmp_label_lst = self.dropblock(label_lst) # (B, 16, H, W)
for c in range(tmp_label_lst.shape[1]):
tmp_feature = (feature * tmp_label_lst[:, c:c+1, :, :]).view(feature.shape[0], feature.shape[1], -1) # (B, 256, H*W)
tmp_feature = tmp_feature.sum(dim=-1) # (B, 256)
tmp_num = tmp_label_lst[:, c:c+1, :, :].view(tmp_label_lst.shape[0], -1) # (B, H*W)
tmp_num = tmp_num.sum(dim=-1) # (B,)
keep_ind = tmp_num != 0
if keep_ind.shape[0]:
tmp_feature = tmp_feature[keep_ind]
tmp_num = tmp_num[keep_ind]
tmp_feature = tmp_feature / tmp_num.unsqueeze(dim=1) # (B, 256)
new_feature.append(tmp_feature)
new_label.append(torch.ones(tmp_feature.shape[0])*c)
new_feature = torch.cat(new_feature, dim=0) # (N, 256)
new_feature = self.model(new_feature) # (N, 128)
new_label = torch.cat(new_label, dim=0).to(feature.device) # (N,)
logit = self.classifier(new_feature) # (N, 16)
return F.normalize(new_feature), new_label.long(), logit
def forward_feature(self, feature):
# feature: (1, 256)
new_feature = self.model(feature) # (1, 128)
return F.normalize(new_feature)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
unknown_num = len(train_dst.unknown_target)
remain_class = opts.num_classes - unknown_num
opts.num_classes = remain_class
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride, metric_dim=opts.metric_dim)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CircleLoss(m=0.25, gamma=8.0).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
res = model.load_state_dict(checkpoint["model_state"])
print(res)
model = nn.DataParallel(model)
model.to(device)
# if opts.continue_training:
# optimizer.load_state_dict(checkpoint["optimizer_state"])
# scheduler.load_state_dict(checkpoint["scheduler_state"])
# print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
model = nn.DataParallel(model)
model.to(device)
for _, param in model.named_parameters():
param.requires_grad = False
metric_model = MetricModel(remain_class).to(device)
optimizer = torch.optim.SGD(metric_model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
if (opts.test_only):
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
metric_model.load_state_dict(checkpoint["metric_model"])
val(opts, model, metric_model, train_loader, val_loader, device)
return
#res = model.load_state_dict(checkpoint["model_state"])
print(res)
#model = nn.DataParallel(model)
#model.to(device)
train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 43,558 | 44.092133 | 152 | py |
RAML | RAML-master/incremental/utils/loss.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(
inputs, targets, reduction='none', ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
class CrossEntropyLoss(nn.Module):
def __init__(self, alpha=0, beta=0, gamma=0, size_average=True, ignore_index=255):
super(CrossEntropyLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
self.criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index,size_average=self.size_average)
if self.cuda:
self.criterion = self.criterion.cuda()
def forward(self, logit, target, features_in):
n, c, h, w = logit.size()
CE_loss = self.criterion(logit, target.long())
return CE_loss / n
VAR_loss = Variable(torch.Tensor([0])).cuda()
Inter_loss = Variable(torch.Tensor([0])).cuda()
Center_loss = Variable(torch.Tensor([0])).cuda()
for i in range(n):
label = target[i]
label = label.flatten().cpu().numpy()
features = logit[i]
features = features.permute(1, 2, 0).contiguous()
shape = features.size()
features = features.view(shape[0]*shape[1], shape[2])
features_in_temp = features_in[i]
instances, counts = np.unique(label, False, False, True)
# print('counts', counts)
total_size = int(np.sum(counts))
for instance in instances:
if instance == self.ignore_index: # Ignore background
continue
locations = torch.LongTensor(np.where(label == instance)[0]).cuda()
vectors = torch.index_select(features, dim=0, index=locations)
features_temp = torch.index_select(features_in_temp, dim=0, index=locations)
centers_temp = torch.mean(features_temp, dim=0)
features_temp = features_temp - centers_temp
Center_loss += torch.sum(features_temp ** 2) / total_size
# print(size)
# print(-vectors[:,int(instance)])
# get instance mean and distances to mean of all points in an instance
VAR_loss += torch.sum((-vectors[:,int(instance)]))/total_size
Inter_loss += (torch.sum(vectors) - torch.sum((vectors[:,int(instance)]))) / total_size
# total_size += size
# VAR_loss += var_loss/total_size
loss = (CE_loss + self.alpha * VAR_loss + self.beta * Inter_loss +self.gamma * Center_loss) / n
# print(CE_loss/n, self.alpha * VAR_loss/n, self.beta * Inter_loss/n, self.gamma * Center_loss/n)
return loss
class CrossEntropyLoss_dis(nn.Module):
def __init__(self, alpha=0, beta=0, gamma=0, size_average=True, ignore_index=255):
super(CrossEntropyLoss_dis, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
def forward(self, logit, target, features_1, features_2):
n, c, h, w = logit.size()
criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index,size_average=self.size_average)
if self.cuda:
criterion = criterion.cuda()
CE_loss = criterion(logit, target.long())
return CE_loss / n
DIS_loss = Variable(torch.Tensor([0])).cuda()
appendix_lay = torch.zeros(n,w,h,1).cuda()
features_1 = torch.cat((features_1, appendix_lay), dim=3)
# print('features_1.shape: ', features_1.shape)
# print('features_2.shape: ', features_2.shape)
for i in range(n):
features_origin = features_1[i][target[i] != 16]
features_new = features_2[i][target[i] != 16]
features_diff = features_new - features_origin
DIS_loss += torch.sum(features_diff ** 2) / (features_diff.shape[0])
loss = CE_loss / n + 0.01 * DIS_loss / n
# print(CE_loss, DIS_loss)
return loss
# class CenterLoss(nn.Module):
# """Center loss.
# Reference:
# Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
# Args:
# num_classes (int): number of classes.
# feat_dim (int): feature dimension.
# """
# def __init__(self, num_classes=10, feat_dim=256, use_gpu=True):
# super(CenterLoss, self).__init__()
# self.num_classes = num_classes
# self.feat_dim = feat_dim
# self.use_gpu = use_gpu
# if self.use_gpu:
# self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) # (C, M)
# else:
# self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
# def forward(self, x, labels):
# """
# Args:
# x: feature matrix with shape (batch_size, feat_dim, h, w).
# labels: ground truth labels with shape (batch_size, h, w).
# """
# batch_size = x.size(0)
# x = x.permute(0, 2, 3, 1) # (B, H, W, M)
# x = x.reshape((-1,self.feat_dim)) # (N, M)
# sample_size= x.size(0) # N
# labels = labels.flatten() # (N,)
# assert sample_size == labels.size(0)
# # (N, M) --> (N, 1) --> (N, C) | (C, M) --> (C, 1) --> (C, N) --> (N, C)
# # (N, C)
# distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(sample_size, self.num_classes) + \
# torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, sample_size).t()
# # distmat - 2 (x * center.T)
# distmat.addmm_(1, -2, x, self.centers.t())
# classes = torch.arange(self.num_classes).long()
# if self.use_gpu: classes = classes.cuda()
# labels = labels.unsqueeze(1).expand(sample_size, self.num_classes)
# mask = labels.eq(classes.expand(sample_size, self.num_classes))
# dist = distmat * mask.float()
# loss = dist.clamp(min=1e-12, max=1e+12).sum() / sample_size
# return loss / batch_size
class CenterLoss(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes=10, feat_dim=256, use_gpu=True):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) # (C, M)
self.criterion = nn.CrossEntropyLoss().cuda()
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
self.criterion = nn.CrossEntropyLoss()
def _dis_criterion(self, x, labels):
# x: (B, M, H, W) | labels: (B, H, W)
_, _, H, W = x.shape
assert H == W
x = torch.nn.functional.interpolate(x, size=[H//2, W//2])
labels = torch.nn.functional.interpolate(labels.unsqueeze(dim=1).float(), size=[H//2, W//2], mode="nearest")
logit = [-torch.sum((x.unsqueeze(dim=1) - self.centers.clone()[c:c+1, :].detach().view(1, 1, self.centers.shape[1], 1, 1)) ** 2, dim=2) for c in range(self.num_classes)]
logit = torch.cat(logit, dim=1)
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, self.num_classes)
label = labels.contiguous().view(-1)
#logit = -torch.sum((x.unsqueeze(dim=1) - self.centers.clone().detach().view(1, *self.centers.shape, 1, 1)) ** 2, dim=2)
loss = self.criterion(logit[label != 255], label[label != 255].long())
return loss
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim, h, w).
labels: ground truth labels with shape (batch_size, h, w).
"""
# feature = x.clone()
# feature_label = labels.clone()
batch_size = x.size(0)
x = x.permute(0, 2, 3, 1) # (B, H, W, M)
x = x.reshape((-1,self.feat_dim)) # (N, M)
sample_size= x.size(0) # N
labels = labels.flatten() # (N,)
assert sample_size == labels.size(0)
# (N, M) --> (N, 1) --> (N, C) | (C, M) --> (C, 1) --> (C, N) --> (N, C)
# (N, C)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(sample_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, sample_size).t()
# distmat - 2 (x * center.T)
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(sample_size, self.num_classes)
mask = labels.eq(classes.expand(sample_size, self.num_classes))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / sample_size
#norm_loss = torch.exp(-torch.norm(self.centers.unsqueeze(dim=0)-self.centers.unsqueeze(dim=1), p=2, dim=-1))
#dis_loss = self._dis_criterion(feature, feature_label)
return loss / batch_size #+ norm_loss / batch_size
if __name__ =='__main__':
center_loss=CenterLoss()
print(center_loss.centers.data.shape)
center=center_loss.centers.data
torch.save(center,'center.pth')
#torch.save('./center.pth',center_loss.state_dict()) | 10,333 | 39.84585 | 177 | py |
RAML | RAML-master/incremental/utils/utils.py | from torchvision.transforms.functional import normalize
import torch.nn as nn
import numpy as np
import os
def denormalize(tensor, mean, std):
mean = np.array(mean)
std = np.array(std)
_mean = -mean/std
_std = 1/std
return normalize(tensor, _mean, _std)
class Denormalize(object):
def __init__(self, mean, std):
mean = np.array(mean)
std = np.array(std)
self._mean = -mean/std
self._std = 1/std
def __call__(self, tensor):
if isinstance(tensor, np.ndarray):
return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1)
return normalize(tensor, self._mean, self._std)
def set_bn_momentum(model, momentum=0.1):
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = momentum
def fix_bn(model):
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def colorEncode(labelmap, colors, mode='RGB'):
labelmap = labelmap.astype('int')
labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
dtype=np.uint8)
for label in unique(labelmap):
if label < 0:
continue
labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
np.tile(colors[label],
(labelmap.shape[0], labelmap.shape[1], 1))
if mode == 'BGR':
return labelmap_rgb[:, :, ::-1]
else:
return labelmap_rgb
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
| 2,850 | 28.391753 | 84 | py |
RAML | RAML-master/incremental/utils/scheduler.py | from torch.optim.lr_scheduler import _LRScheduler, StepLR
class PolyLR(_LRScheduler):
def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1, min_lr=1e-6):
self.power = power
self.max_iters = max_iters # avoid zero lr
self.min_lr = min_lr
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [ max( base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power, self.min_lr)
for base_lr in self.base_lrs] | 509 | 41.5 | 96 | py |
RAML | RAML-master/incremental/utils/visualizer.py | from visdom import Visdom
import json
class Visualizer(object):
""" Visualizer
"""
def __init__(self, port='13579', env='main', id=None):
self.cur_win = {}
self.vis = Visdom(port=port, env=env)
self.id = id
self.env = env
# Restore
ori_win = self.vis.get_window_data()
ori_win = json.loads(ori_win)
#print(ori_win)
self.cur_win = { v['title']: k for k, v in ori_win.items() }
def vis_scalar(self, name, x, y, opts=None):
if not isinstance(x, list):
x = [x]
if not isinstance(y, list):
y = [y]
if self.id is not None:
name = "[%s]"%self.id + name
default_opts = { 'title': name }
if opts is not None:
default_opts.update(opts)
win = self.cur_win.get(name, None)
if win is not None:
self.vis.line( X=x, Y=y, opts=default_opts, update='append',win=win )
else:
self.cur_win[name] = self.vis.line( X=x, Y=y, opts=default_opts)
def vis_image(self, name, img, env=None, opts=None):
""" vis image in visdom
"""
if env is None:
env = self.env
if self.id is not None:
name = "[%s]"%self.id + name
win = self.cur_win.get(name, None)
default_opts = { 'title': name }
if opts is not None:
default_opts.update(opts)
if win is not None:
self.vis.image( img=img, win=win, opts=opts, env=env )
else:
self.cur_win[name] = self.vis.image( img=img, opts=default_opts, env=env )
def vis_table(self, name, tbl, opts=None):
win = self.cur_win.get(name, None)
tbl_str = "<table width=\"100%\"> "
tbl_str+="<tr> \
<th>Term</th> \
<th>Value</th> \
</tr>"
for k, v in tbl.items():
tbl_str+= "<tr> \
<td>%s</td> \
<td>%s</td> \
</tr>"%(k, v)
tbl_str+="</table>"
default_opts = { 'title': name }
if opts is not None:
default_opts.update(opts)
if win is not None:
self.vis.text(tbl_str, win=win, opts=default_opts)
else:
self.cur_win[name] = self.vis.text(tbl_str, opts=default_opts)
if __name__=='__main__':
import numpy as np
vis = Visualizer(port=13500, env='main')
tbl = {"lr": 214, "momentum": 0.9}
vis.vis_table("test_table", tbl)
tbl = {"lr": 244444, "momentum": 0.9, "haha": "hoho"}
vis.vis_table("test_table", tbl)
| 2,661 | 30.690476 | 86 | py |
RAML | RAML-master/incremental/utils/__init__.py | from .utils import *
# from .visualizer import Visualizer
from .scheduler import PolyLR
from .loss import FocalLoss, CrossEntropyLoss, CrossEntropyLoss_dis, CenterLoss | 167 | 41 | 79 | py |
RAML | RAML-master/incremental/utils/ext_transforms.py | import torchvision
import torch
import torchvision.transforms.functional as F
import random
import numbers
import numpy as np
from PIL import Image
#
# Extended Transforms for Semantic Segmentation
#
class ExtRandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.hflip(img), F.hflip(lbl)
return img, lbl
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class ExtCompose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, lbl):
for t in self.transforms:
img, lbl = t(img, lbl)
return img, lbl
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ExtCenterCrop(object):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return F.center_crop(img, self.size), F.center_crop(lbl, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class ExtRandomScale(object):
def __init__(self, scale_range, interpolation=Image.BILINEAR):
self.scale_range = scale_range
self.interpolation = interpolation
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
lbl (PIL Image): Label to be scaled.
Returns:
PIL Image: Rescaled image.
PIL Image: Rescaled label.
"""
assert img.size == lbl.size
scale = random.uniform(self.scale_range[0], self.scale_range[1])
target_size = ( int(img.size[1]*scale), int(img.size[0]*scale) )
return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class ExtScale(object):
"""Resize the input PIL Image to the given scale.
Args:
Scale (sequence or int): scale factors
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, scale, interpolation=Image.BILINEAR):
self.scale = scale
self.interpolation = interpolation
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
lbl (PIL Image): Label to be scaled.
Returns:
PIL Image: Rescaled image.
PIL Image: Rescaled label.
"""
assert img.size == lbl.size
target_size = ( int(img.size[1]*self.scale), int(img.size[0]*self.scale) ) # (H, W)
return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class ExtRandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img, lbl):
"""
img (PIL Image): Image to be rotated.
lbl (PIL Image): Label to be rotated.
Returns:
PIL Image: Rotated image.
PIL Image: Rotated label.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center), F.rotate(lbl, angle, self.resample, self.expand, self.center)
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string
class ExtRandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.hflip(img), F.hflip(lbl)
return img, lbl
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class ExtRandomVerticalFlip(object):
"""Vertically flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be flipped.
lbl (PIL Image): Label to be flipped.
Returns:
PIL Image: Randomly flipped image.
PIL Image: Randomly flipped label.
"""
if random.random() < self.p:
return F.vflip(img), F.vflip(lbl)
return img, lbl
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class ExtPad(object):
def __init__(self, diviser=32):
self.diviser = diviser
def __call__(self, img, lbl):
h, w = img.size
ph = (h//32+1)*32 - h if h%32!=0 else 0
pw = (w//32+1)*32 - w if w%32!=0 else 0
im = F.pad(img, ( pw//2, pw-pw//2, ph//2, ph-ph//2) )
lbl = F.pad(lbl, ( pw//2, pw-pw//2, ph//2, ph-ph//2))
return im, lbl
class ExtToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __init__(self, normalize=True, target_type='uint8'):
self.normalize = normalize
self.target_type = target_type
def __call__(self, pic, lbl):
"""
Note that labels will not be normalized to [0, 1].
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
lbl (PIL Image or numpy.ndarray): Label to be converted to tensor.
Returns:
Tensor: Converted image and label
"""
if self.normalize:
return F.to_tensor(pic), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
else:
return torch.from_numpy( np.array( pic, dtype=np.float32).transpose(2, 0, 1) ), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
def __repr__(self):
return self.__class__.__name__ + '()'
class ExtNormalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor, lbl):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
tensor (Tensor): Tensor of label. A dummy input for ExtCompose
Returns:
Tensor: Normalized Tensor image.
Tensor: Unchanged Tensor label
"""
return F.normalize(tensor, self.mean, self.std), lbl
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class ExtRandomCrop(object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
"""
def __init__(self, size, padding=0, pad_if_needed=False):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be cropped.
lbl (PIL Image): Label to be cropped.
Returns:
PIL Image: Cropped image.
PIL Image: Cropped label.
"""
assert img.size == lbl.size, 'size of img and lbl should be the same. %s, %s'%(img.size, lbl.size)
if self.padding > 0:
img = F.pad(img, self.padding)
lbl = F.pad(lbl, self.padding)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = F.pad(img, padding=int((1 + self.size[1] - img.size[0]) / 2))
lbl = F.pad(lbl, padding=int((1 + self.size[1] - lbl.size[0]) / 2))
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = F.pad(img, padding=int((1 + self.size[0] - img.size[1]) / 2))
lbl = F.pad(lbl, padding=int((1 + self.size[0] - lbl.size[1]) / 2))
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w), F.crop(lbl, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class ExtResize(object):
"""Resize the input PIL Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation), F.resize(lbl, self.size, Image.NEAREST)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class ExtColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img), lbl
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string | 20,817 | 35.458844 | 150 | py |
LLP-VAT | LLP-VAT-main/llp_vat/main.py | import argparse
import os
import uuid
from tqdm.auto import tqdm
import arrow
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from llp_vat.lib.llp import (BagMiniBatch, load_llp_dataset, BagSampler,
Iteration)
from llp_vat.lib.losses import (ProportionLoss, PiModelLoss, VATLoss,
compute_hard_l1, compute_soft_kl)
from llp_vat.lib.networks import wide_resnet28_2
from llp_vat.lib.run_experiment import (write_meters, RunExperiment,
save_checkpoint)
from llp_vat.lib.ramps import sigmoid_rampup
from llp_vat.lib.utils import AverageMeterSet, accuracy, parameters_string
def get_rampup_weight(weight, iteration, rampup):
alpha = weight * sigmoid_rampup(iteration.value, rampup)
return alpha
def train_llp(args, epoch, iteration, model, optimizer, loader,
criterion, consistency_criterion, logger):
meters = AverageMeterSet()
mini_batch = BagMiniBatch(args.n_samples)
# set up training mode for model
model.train()
for i, (x, y) in tqdm(enumerate(loader),
"[train#{}]".format(epoch),
leave=False,
ncols=150,
total=len(loader),
disable=args.disable):
with torch.autograd.set_detect_anomaly(True):
x = x.cuda()
y = y.cuda()
# accumulate x until the batch size is greater than or equal to
# the buffer size
mini_batch.append(x, y)
if mini_batch.num_bags < args.mini_batch_size:
continue
# skip training if there exists only one instance in a mini-batch
# because the BatchNorm would crash under this circumstance
if mini_batch.total_size == 1:
continue
# concatenate all bags
x, y = map(torch.cat, zip(*mini_batch.bags))
logits = None
if args.consistency_type == "vat":
# VAT should be calculated before the forward for cross entropy
consistency_loss = consistency_criterion(model, x)
elif args.consistency_type == "pi":
consistency_loss, logits = consistency_criterion(model, x)
else:
consistency_loss = torch.tensor(0.)
alpha = get_rampup_weight(args.consistency, iteration,
args.consistency_rampup)
consistency_loss = alpha * consistency_loss
meters.update("cons_loss", consistency_loss.item())
meters.update("cons_weight", alpha)
# reuse the logits from pi-model
if logits is None:
logits = model(x)
probs = F.softmax(logits, dim=1)
# compute proportion loss for each bag
if args.alg == "uniform":
# compute propotion loss in the batch way
batch_probs = probs.view(
mini_batch.num_bags, args.bag_size, -1)
batch_avg_probs = torch.mean(batch_probs, dim=1)
batch_target = torch.stack(mini_batch.targets)
prop_loss = criterion(batch_avg_probs, batch_target)
else:
# compute proportion loss in sequential way
prop_loss = 0
start = 0
for bag_size, target in mini_batch:
# proportion loss
avg_probs = torch.mean(
probs[start:start + bag_size], dim=0)
prop_loss += criterion(avg_probs, target)
start += bag_size
prop_loss = prop_loss / mini_batch.num_bags
meters.update("prop_loss", prop_loss.item())
# proportion_loss + consistency_loss
loss = prop_loss + consistency_loss
meters.update("loss", loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration.step()
prec1, prec5 = accuracy(logits, y.argmax(1), top_k=(1, 5))
meters.update("top1", prec1.item(), y.size(0))
meters.update("top5", prec5.item(), y.size(0))
# clear mini_batch
mini_batch.reset()
if logger:
logger.info("Epoch#{}-{} "
"cons_weight={meters[cons_weight].avg:.4f} "
"cons_loss={meters[cons_loss].avg:.4f} "
"prop_loss={meters[prop_loss].avg:.4f} "
"loss={meters[loss].avg:.4f} "
"prec@1={meters[top1].avg:.2f}% "
"prec@5={meters[top5].avg:.2f}%".format(epoch,
iteration.value,
meters=meters))
return meters
def eval(args, epoch, iteration, model, loader, criterion, logger, prefix=""):
meters = AverageMeterSet()
num_classes = 100 if args.dataset_name == 'cifar100' else 10
model.eval()
for x, y in tqdm(loader,
"[Evalutaion]",
leave=False,
ncols=150,
disable=args.disable):
x = x.cuda()
y = y.cuda()
with torch.no_grad():
logits = model(x)
probs = F.softmax(logits, dim=1)
avg_probs = torch.mean(probs, dim=0)
avg_ys = torch.mean(y, dim=0)
soft_kl = compute_soft_kl(avg_probs, avg_ys)
hard_l1 = compute_hard_l1(probs, y, num_classes)
loss = criterion(avg_probs, avg_ys)
meters.update('soft_kl', soft_kl.item())
meters.update('hard_l1', hard_l1.item())
meters.update('prop_loss', loss.item())
prec1, prec5 = accuracy(logits, y.argmax(1), top_k=(1, 5))
meters.update('top1', prec1.item(), y.size(0))
meters.update('top5', prec5.item(), y.size(0))
if logger:
logger.info("Epoch#{}-{} "
"{prefix}soft_kl={meters[soft_kl].avg:.4f} "
"{prefix}hard_l1={meters[hard_l1].avg:.4f} "
"{prefix}prop_loss={meters[prop_loss].avg:.4f} "
"{prefix}prec@1={meters[top1].avg:.2f}% "
"{prefix}prec@5={meters[top5].avg:.2f}%".format(
epoch, iteration.value, meters=meters, prefix=prefix))
return meters
def train_valid_split(dataset, valid_ratio, seed):
torch.manual_seed(seed)
valid_size = int(valid_ratio * len(dataset))
train_size = len(dataset) - valid_size
train, valid = random_split(dataset, [train_size, valid_size])
return train, valid
def create_model(model_name, num_classes, dataset_name):
if model_name == "wrn28-2":
if dataset_name.lower().startswith("cifar"):
dropout_rate = 0.3
elif dataset_name.lower().startswith("svhn"):
dropout_rate = 0.4
else:
raise NameError("Unknown dataset name")
print("Dropout: {}".format(dropout_rate))
return wide_resnet28_2(dropout_rate=dropout_rate,
num_classes=num_classes)
else:
raise NameError("Unknown model name")
def run_experiment(args, experiment):
experiment.save_config(vars(args))
# create logger for training, testing, validation
logger = experiment.create_logfile("experiment")
train_log = experiment.create_logfile("train")
valid_log = experiment.create_logfile("valid")
test_log = experiment.create_logfile("test")
# create tensorboard writer
tb_writer = experiment.create_tb_writer()
logger.info(args)
# load LLP dataset
if args.alg == "uniform":
dataset, bags = load_llp_dataset(args.dataset_dir,
args.obj_dir,
args.dataset_name,
args.alg,
replacement=args.replacement,
bag_size=args.bag_size)
elif args.alg == "kmeans":
dataset, bags = load_llp_dataset(args.dataset_dir,
args.obj_dir,
args.dataset_name,
args.alg,
n_clusters=args.n_clusters,
reduction=args.reduction)
else:
raise NameError("The bag creation algorithm is unknown")
# consturct data loader
train_bags, valid_bags = train_valid_split(bags, args.valid, args.seed)
train_bag_sampler = BagSampler(train_bags, args.num_bags)
train_loader = DataLoader(dataset["train"],
batch_sampler=train_bag_sampler,
pin_memory=True,
num_workers=2)
valid_loader = None
if args.valid > 0:
valid_bag_sampler = BagSampler(valid_bags, num_bags=-1)
valid_loader = DataLoader(dataset["train"],
batch_sampler=valid_bag_sampler,
pin_memory=True,
num_workers=2)
test_loader = DataLoader(dataset["test"],
batch_size=256,
pin_memory=True,
num_workers=2)
# declare model
model = create_model(args.model_name, dataset["num_classes"],
args.dataset_name)
model = model.cuda()
# declare optimizer
if args.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(),
momentum=0.9,
lr=args.lr,
weight_decay=args.weight_decay)
elif args.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
else:
raise NameError("optimizer {} is not supported".format(args.optimizer))
# print model architecture and optimizer
logger.info(parameters_string(model))
logger.info(optimizer)
# declare LLP criterion - the Proportion loss
criterion = ProportionLoss(args.metric, 1.0)
logger.info(criterion)
# declare consistency criterion
if args.consistency_type == "none":
consistency_criterion = None
elif args.consistency_type == "vat":
consistency_criterion = VATLoss(xi=args.xi, eps=args.eps, ip=args.ip)
elif args.consistency_type == "pi":
consistency_criterion = PiModelLoss(std=args.std)
else:
raise NameError("Unknown consistency criterion")
if consistency_criterion and args.consistency_rampup == -1:
args.consistency_rampup = 0.4 * args.num_epochs * \
len(train_loader) / args.mini_batch_size
# ajust learning rate
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=320, gamma=0.2)
iteration = Iteration()
for epoch in range(args.num_epochs):
train_meters = train_llp(args, epoch, iteration, model,
optimizer, train_loader, criterion,
consistency_criterion, train_log)
write_meters(epoch, "train", tb_writer, train_meters)
if valid_loader:
valid_meters = eval(args, epoch, iteration, model, valid_loader,
criterion, valid_log)
write_meters(epoch, "valid", tb_writer, valid_meters)
test_meters = eval(args, epoch, iteration, model, test_loader,
criterion, test_log)
write_meters(epoch, "test", tb_writer, test_meters)
scheduler.step()
# save checkpoint
if (epoch + 1) % 50 == 0:
logger.info("Save checkpoint#{}".format(epoch))
filename = os.path.join(experiment.result_dir, "model.tar")
save_checkpoint(filename, model, epoch, optimizer)
tb_writer.close()
def main(args):
uid = "{time}_{uuid}".format(
time=arrow.utcnow().format("YYYYMMDDTHH:mm:ss"),
uuid=str(uuid.uuid4())[:4]
)
result_dir = os.path.join(args.result_dir, uid)
experiment = RunExperiment(result_dir)
run_experiment(args, experiment)
def get_args():
parser = argparse.ArgumentParser(
"Learning from Label Proportions with Consistency Regularization")
# basic arguments
parser.add_argument("--obj_dir", default="./obj")
parser.add_argument("--dataset_dir", default="./obj/dataset")
parser.add_argument("--result_dir", default="./results")
parser.add_argument("-d", "--dataset_name", type=str)
parser.add_argument("-m", "--model_name", type=str, default="wrn28-2")
parser.add_argument("-e", "--num_epochs", type=int, default=400)
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--optimizer", type=str, default="adam")
parser.add_argument("--weight_decay", type=float, default=0.)
parser.add_argument("--metric", type=str, default="ce")
parser.add_argument("--valid", type=float, default=0.1)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--n_samples", default=0, type=int)
parser.add_argument("--disable", action="store_true",
help="disable the progress bar")
# bag creation algorithms
parser.add_argument("--alg", choices=["uniform", "kmeans"])
parser.add_argument("-b", "--bag_size", type=int)
parser.add_argument("--replacement", action="store_true")
parser.add_argument("-k", "--n_clusters", type=int)
parser.add_argument("--reduction", type=int, default=600)
# coefficient for proportion loss
parser.add_argument("--num_bags", default=-1, type=int)
parser.add_argument("--mini_batch_size", type=int, default=2)
# consistency args
parser.add_argument("--consistency_type",
choices=["vat", "pi", "none"],
default="vat")
parser.add_argument("--consistency", type=float, default=0.05)
parser.add_argument("--consistency_rampup", type=int, default=-1)
# pi args
parser.add_argument("--std", type=float, default=0.15)
# vat args
parser.add_argument("--xi", type=float, default=1e-6)
parser.add_argument("--eps", type=float, default=6.0)
parser.add_argument("--ip", type=int, default=1)
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
main(args)
| 14,895 | 39.150943 | 79 | py |
LLP-VAT | LLP-VAT-main/llp_vat/preprocessing.py | import argparse
from llp_vat.lib.llp import create_llp_dataset
def main(args):
# create LLP dataset
if args.alg == "uniform":
kwargs = dict(replacement=args.replacement,
bag_size=args.bag_size,
seed=args.seed)
elif args.alg == "kmeans":
kwargs = dict(n_clusters=args.n_clusters,
reduction=args.reduction,
seed=args.seed)
else:
raise NameError("The bag creation algorithm is not supported")
create_llp_dataset(args.dataset_dir,
args.obj_dir,
args.dataset_name,
args.alg,
**kwargs)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--obj_dir", default="./obj")
parser.add_argument("--dataset_dir", default="./obj/dataset")
parser.add_argument("-d", "--dataset_name",
choices=["svhn", "cifar10", "cifar100"],
required=True)
parser.add_argument("--alg", choices=["uniform", "kmeans"], required=True)
parser.add_argument("-b", "--bag_size", type=int)
parser.add_argument("--replacement", action="store_true")
parser.add_argument("-k", "--n_clusters", type=int)
parser.add_argument("--reduction", type=int, default=600)
parser.add_argument("--seed", default=0, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
main(args)
| 1,501 | 33.136364 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/losses.py | import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.constraints import simplex
from llp_vat.lib.networks import GaussianNoise
def compute_soft_kl(inputs, targets):
with torch.no_grad():
loss = cross_entropy_loss(inputs, targets)
loss = torch.sum(loss, dim=-1).mean()
return loss
def compute_hard_l1(inputs, targets, num_classes):
with torch.no_grad():
predicted = torch.bincount(inputs.argmax(1),
minlength=num_classes).float()
predicted = predicted / torch.sum(predicted, dim=0)
targets = torch.mean(targets, dim=0)
loss = F.l1_loss(predicted, targets, reduction="sum")
return loss
def cross_entropy_loss(input, target, eps=1e-8):
assert simplex.check(input) and simplex.check(target), \
"input {} and target {} should be a simplex".format(input, target)
input = torch.clamp(input, eps, 1 - eps)
loss = -target * torch.log(input)
return loss
class ProportionLoss(nn.Module):
def __init__(self, metric, alpha, eps=1e-8):
super(ProportionLoss, self).__init__()
self.metric = metric
self.eps = eps
self.alpha = alpha
def forward(self, input, target):
# input and target shoud ba a probability tensor
# and have been averaged over bag size
assert simplex.check(input) and simplex.check(target), \
"input {} and target {} should be a simplex".format(input, target)
assert input.shape == target.shape
if self.metric == "ce":
loss = cross_entropy_loss(input, target, eps=self.eps)
elif self.metric == "l1":
loss = F.l1_loss(input, target, reduction="none")
elif self.metric == "mse":
loss = F.mse_loss(input, target, reduction="none")
else:
raise NameError("metric {} is not supported".format(self.metric))
loss = torch.sum(loss, dim=-1).mean()
return self.alpha * loss
@contextlib.contextmanager
def _disable_tracking_bn_stats(model):
def switch_attr(m):
if hasattr(m, 'track_running_stats'):
m.track_running_stats ^= True
model.apply(switch_attr)
yield
model.apply(switch_attr)
def _l2_normalize(d):
d_reshaped = d.view(d.shape[0], -1, *(1 for _ in range(d.dim() - 2)))
d /= torch.norm(d_reshaped, dim=1, keepdim=True) + 1e-8
return d
class VATLoss(nn.Module):
def __init__(self, xi=10.0, eps=1.0, ip=1):
"""VAT loss
:param xi: hyperparameter of VAT (default: 10.0)
:param eps: hyperparameter of VAT (default: 1.0)
:param ip: iteration times of computing adv noise (default: 1)
"""
super(VATLoss, self).__init__()
self.xi = xi
self.eps = eps
self.ip = ip
def forward(self, model, x):
with torch.no_grad():
pred = F.softmax(model(x), dim=1)
# prepare random unit tensor
# d = torch.rand(x.shape).sub(0.5).to(x.device)
d = torch.randn_like(x)
d = _l2_normalize(d)
with _disable_tracking_bn_stats(model):
# calc adversarial direction
for _ in range(self.ip):
d.requires_grad_()
pred_hat = model(x + self.xi * d)
logp_hat = F.log_softmax(pred_hat, dim=1)
adv_distance = F.kl_div(logp_hat, pred, reduction='batchmean')
adv_distance.backward()
d = _l2_normalize(d.grad)
model.zero_grad()
# calc LDS
r_adv = d * self.eps
pred_hat = model(x + r_adv)
logp_hat = F.log_softmax(pred_hat, dim=1)
lds = F.kl_div(logp_hat, pred, reduction='batchmean')
return lds
class PiModelLoss(nn.Module):
def __init__(self, std=0.15):
super(PiModelLoss, self).__init__()
self.gn = GaussianNoise(std)
def forward(self, model, x):
logits1 = model(x)
probs1 = F.softmax(logits1, dim=1)
with torch.no_grad():
logits2 = model(self.gn(x))
probs2 = F.softmax(logits2, dim=1)
loss = F.mse_loss(probs1, probs2, reduction="sum") / x.size(0)
return loss, logits1
| 4,292 | 30.8 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/run_experiment.py | import glob
import os
import pathlib
import warnings
import logzero
import torch
import torch.nn as nn
import yaml
from torch.utils.tensorboard import SummaryWriter
def write_meters(epoch, tag, tb_writer, meters):
for name, value in meters.averages("").items():
tb_writer.add_scalar("{}/{}".format(tag, name), value, epoch)
def save_checkpoint(filename, model, epoch, optimizer=None):
checkpoint = {'epoch': epoch}
if isinstance(model, nn.DataParallel):
checkpoint['state_dict'] = model.module.state_dict()
else:
checkpoint['state_dict'] = model.state_dict()
if optimizer is not None:
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, filename)
def load_checkpoint(filename, model, optimizer=None, device="cpu"):
checkpoint = torch.load(filename, map_location=device)
model.load_state_dict(checkpoint['state_dict'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
return model, optimizer
else:
return model
class RunExperiment:
def __init__(self, result_dir, mode="w"):
self._check_path(result_dir)
self.result_dir = result_dir
self.mode = mode
def _check_path(self, path):
"""Create directory if path doesn't exist"""
if path is not None:
if os.path.isfile(path):
raise TypeError("Cannot create directory {}".format(path))
target_dir = path
if os.path.exists(path):
warnings.warn(
"Experiment {} has been executed before".format(path))
opt = input("Continue running the experiment, y/[n]: ")
if opt.lower() != "y":
raise RuntimeError()
pathlib.Path(target_dir).mkdir(parents=True, exist_ok=True)
def create_logfile(self, name):
fmt = ("%(color)s[%(levelname)s %(name)s %(asctime)s]"
"%(end_color)s %(message)s")
log_fmt = logzero.LogFormatter(fmt=fmt)
if name is None:
filename = None
elif not name.endswith(".log"):
filename = os.path.join(self.result_dir, name + ".log")
else:
filename = os.path.join(self.result_dir, name)
if os.path.exists(filename):
os.remove(filename)
return logzero.setup_logger(name=name,
logfile=filename,
formatter=log_fmt)
def create_tb_writer(self):
# remove previous tensorboard results
files = glob.glob(os.path.join(self.result_dir,
'events.out.tfevents*'))
for f in files:
try:
os.remove(f)
except Exception:
raise RuntimeError("Error while removing file {}".format(f))
writer = SummaryWriter(self.result_dir)
return writer
def save_config(self, config):
with open(os.path.join(self.result_dir, "config.yml"), "w") as fp:
yaml.dump(config, fp)
| 3,101 | 31.652632 | 76 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/utils.py | def accuracy(output, target, top_k=(1, )):
"""Computes the precision@k for the specified values of k"""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
if len(res) == 1:
res = res[0]
return res
def parameters_string(module):
lines = [
"",
"Model name: {}".format(module.__class__.__name__),
"List of model parameters:",
"=========================",
]
row_format = "{name:<40} {shape:>20} ={total_size:>12,d}"
params = list(module.named_parameters())
for name, param in params:
lines.append(
row_format.format(name=name,
shape=" * ".join(str(p) for p in param.size()),
total_size=param.numel()))
lines.append("=" * 75)
lines.append(
row_format.format(name="all parameters",
shape="sum of above",
total_size=sum(
int(param.numel()) for name, param in params)))
lines.append("")
return "\n".join(lines)
class AverageMeterSet:
def __init__(self):
self.meters = {}
def __getitem__(self, key):
return self.meters[key]
def update(self, name, value, n=1):
if name not in self.meters:
self.meters[name] = AverageMeter()
self.meters[name].update(value, n)
def reset(self):
for meter in self.meters.values():
meter.reset()
def values(self, postfix=''):
return {
name + postfix: meter.val
for name, meter in self.meters.items()
}
def averages(self, postfix='/avg'):
return {
name + postfix: meter.avg
for name, meter in self.meters.items()
}
def sums(self, postfix='/sum'):
return {
name + postfix: meter.sum
for name, meter in self.meters.items()
}
def counts(self, postfix='/count'):
return {
name + postfix: meter.count
for name, meter in self.meters.items()
}
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __format__(self, format):
return "{self.val:{format}} ({self.avg:{format}})".format(
self=self, format=format)
| 2,897 | 26.084112 | 77 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/networks.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def wide_resnet28_2(**kwargs):
net = WideResNet(28, 2, **kwargs)
net.apply(conv_init)
return net
class GaussianNoise(nn.Module):
""" add gasussian noise into feature """
def __init__(self, std):
super(GaussianNoise, self).__init__()
self.std = std
def forward(self, x):
zeros_ = torch.zeros_like(x)
n = torch.normal(zeros_, std=self.std)
return x + n
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
padding=1,
bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
planes,
kernel_size=1,
stride=stride,
bias=True), )
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(WideResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) // 6
k = widen_factor
print('| Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(WideBasic,
nStages[1],
n,
dropout_rate,
stride=1)
self.layer2 = self._wide_layer(WideBasic,
nStages[2],
n,
dropout_rate,
stride=2)
self.layer3 = self._wide_layer(WideBasic,
nStages[3],
n,
dropout_rate,
stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
| 4,260 | 31.526718 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/llp.py | import os
import pathlib
import time
from itertools import groupby
import numpy as np
import torch
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import PCA
from torch.utils.data import Sampler, BatchSampler, RandomSampler
from llp_vat.lib.datasets import load_dataset
class Iteration:
def __init__(self, start=0):
self.value = start
def step(self, step=1):
self.value += step
class BagMiniBatch:
def __init__(self, n_samples):
self.n_samples = n_samples
self.reset()
def reset(self):
self.bags = []
self.bag_sizes = []
self.targets = [] # store proportion labels
def append(self, x, y):
assert x.size(0) == y.size(0)
self.targets.append(torch.mean(y, dim=0))
if self.n_samples > 0:
index = torch.randperm(x.size(0))[:self.n_samples]
x = x[index]
y = y[index]
self.bags.append((x, y))
self.bag_sizes.append(y.size(0))
def __iter__(self):
for item in zip(self.bag_sizes, self.targets):
yield item
@property
def total_size(self):
return sum(self.bag_sizes)
@property
def max_bag_size(self):
return max(self.bag_sizes)
@property
def num_bags(self):
return len(self.bag_sizes)
class BagSampler(Sampler):
def __init__(self, bags, num_bags=-1):
"""
params:
bags: shape (num_bags, num_instances), the element of a bag
is the instance index of the dataset
num_bags: int, -1 stands for using all bags
"""
self.bags = bags
if num_bags == -1:
self.num_bags = len(bags)
else:
self.num_bags = num_bags
assert 0 < self.num_bags <= len(bags)
def __iter__(self):
indices = torch.randperm(self.num_bags)
for index in indices:
yield self.bags[index]
def __len__(self):
return len(self.bags)
def uniform_creation(dataset, bag_size, replacement, seed, drop_last=True):
"""
return:
bags: a nested list containing instance indices, shape (n_bags, *)
"""
torch.manual_seed(seed)
start = time.time()
indices = RandomSampler(range(len(dataset)), replacement=replacement)
bags = list(BatchSampler(indices, batch_size=bag_size,
drop_last=drop_last))
print("Create uniform bags in {:.2f} seconds".format(time.time() - start))
return bags
def kmeans_creation(dataset, n_clusters, reduction, seed):
random_state = np.random.RandomState(seed)
data = [(x, y) for (x, y) in dataset]
X, y = map(torch.stack, zip(*data))
X = X.view(X.size(0), -1)
# PCA reduction
start = time.time()
pca = PCA(n_components=reduction)
X_new = pca.fit_transform(X)
print("PCA-{} in {:.2f} seconds".format(reduction, time.time() - start))
# assign bag label by k-means clustering
start = time.time()
init_size = max(3 * n_clusters, 300)
kmeans = MiniBatchKMeans(n_clusters=n_clusters,
random_state=random_state,
init_size=init_size)
kmeans.fit(X_new)
bag_labels = kmeans.predict(X_new)
print("K-means {} in {:.2f} seconds".format(n_clusters,
time.time() - start))
# create bags
start = time.time()
bags = sorted(zip(bag_labels, range(len(bag_labels))), key=lambda x: x[0])
bags = [[idx for _, idx in data]
for _, data in groupby(bags, key=lambda x: x[0])]
print("Create kmeans bags in {:.2f} seconds".format(time.time() - start))
return bags
def load_llp_dataset(dataset_dir, obj_dir, dataset_name, alg, **kwargs):
dataset = load_dataset(dataset_dir, dataset_name)
if alg == "uniform":
sampling = "SWR" if kwargs["replacement"] else "SWOR"
filename = "uniform-{}-{}.npy".format(sampling, kwargs["bag_size"])
elif alg == "kmeans":
filename = "kmeans-{}-{}.npy".format(kwargs["n_clusters"],
kwargs["reduction"])
elif alg == "overlap":
filename = "overlap-{}-{}.npy".format(kwargs["num_overlaps"],
kwargs["bag_size"])
else:
raise NameError("algorithm {} is not supported".format(alg))
path = os.path.join(obj_dir, dataset_name, filename)
bags = np.load(path, allow_pickle=True)
print("Load bags from {}".format(path))
return dataset, bags
def create_llp_dataset(dataset_dir, obj_dir, dataset_name, alg, **kwargs):
dataset = load_dataset(dataset_dir, dataset_name)
if alg == "uniform":
sampling = "SWR" if kwargs["replacement"] else "SWOR"
filename = "uniform-{}-{}.npy".format(sampling, kwargs["bag_size"])
bags = uniform_creation(dataset["train"], **kwargs)
elif alg == "kmeans":
filename = "kmeans-{}-{}.npy".format(kwargs["n_clusters"],
kwargs["reduction"])
bags = kmeans_creation(dataset["train"], **kwargs)
else:
raise NameError("algorithm {} is not supported".format(alg))
path = os.path.join(obj_dir, dataset_name, filename)
# dump bags
dirname = os.path.dirname(os.path.abspath(path))
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
bags = np.array(bags)
np.save(path, bags)
| 5,463 | 31.141176 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/datasets.py | import torch
import torch.nn.functional as F
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100, SVHN
class ToOneHot:
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, y: int) -> torch.Tensor:
one_hot = F.one_hot(torch.tensor(y), num_classes=self.num_classes)
return one_hot.float()
def cifar10(root):
channel_stats = dict(mean=[0.4914, 0.4822, 0.4465],
std=[0.2470, 0.2435, 0.2616])
num_classes = 10
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**channel_stats)
]),
"test":
transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(**channel_stats)])
}
train = CIFAR10(root,
train=True,
transform=transform["train"],
target_transform=ToOneHot(num_classes),
download=True)
test = CIFAR10(root,
train=False,
transform=transform["test"],
target_transform=ToOneHot(num_classes),
download=True)
return {'train': train, 'test': test, 'num_classes': num_classes}
def cifar100(root):
channel_stats = dict(mean=[0.5071, 0.4865, 0.4409],
std=[0.2673, 0.2564, 0.2762])
num_classes = 100
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**channel_stats)
]),
"test":
transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(**channel_stats)])
}
train = CIFAR100(root,
train=True,
transform=transform["train"],
target_transform=ToOneHot(num_classes),
download=True)
test = CIFAR100(root,
train=False,
transform=transform["test"],
target_transform=ToOneHot(num_classes),
download=True)
return {'train': train, 'test': test, 'num_classes': num_classes}
def svhn(root):
channel_stats = dict(mean=[0.4377, 0.4438, 0.4728],
std=[0.1980, 0.2010, 0.1970])
num_classes = 10
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**channel_stats)
]),
"test":
transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(**channel_stats)])
}
train = SVHN(root,
split='train',
transform=transform["train"],
target_transform=ToOneHot(num_classes),
download=True)
test = SVHN(root,
split='test',
transform=transform["test"],
target_transform=ToOneHot(num_classes),
download=True)
return {'train': train, 'test': test, 'num_classes': num_classes}
def load_dataset(root, dataset_name):
dataset_name = dataset_name.lower()
if dataset_name == "cifar10":
dataset = cifar10(root)
elif dataset_name == "cifar100":
dataset = cifar100(root)
elif dataset_name == "svhn":
dataset = svhn(root)
else:
raise NameError("dataset {} is not supported".format(dataset_name))
return dataset
| 3,819 | 30.570248 | 75 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/ramps.py | # Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Functions for ramping hyperparameters up or down
Each function takes the current training step or epoch, and the
ramp length in the same format, and returns a multiplier between
0 and 1.
"""
import numpy as np
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert current >= 0 and rampdown_length >= 0
if current >= rampdown_length:
return 0.0
else:
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def linear_rampdown(current, rampdown_length):
assert current >= 0 and rampdown_length >= 0
if current >= rampdown_length:
return 0.0
else:
return 1 - current / rampdown_length
| 1,598 | 30.352941 | 76 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/__init__.py | 0 | 0 | 0 | py |
|
ADLD | ADLD-master/test.py | import argparse
import os
import torch.optim as optim
import torch.utils.data as util_data
import itertools
import network
import pre_process as prep
import lr_schedule
from util import *
from data_list import ImageList_au, ImageList_land_au
optim_dict = {'SGD': optim.SGD, 'Adam': optim.Adam}
def main(config):
## set loss criterion
use_gpu = torch.cuda.is_available()
## prepare data
dsets = {}
dset_loaders = {}
dsets['source'] = {}
dset_loaders['source'] = {}
dsets['source']['test'] = ImageList_au(config.src_test_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['source']['test'] = util_data.DataLoader(dsets['source']['test'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
dsets['target'] = {}
dset_loaders['target'] = {}
dsets['target']['test'] = ImageList_au(config.tgt_test_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['target']['test'] = util_data.DataLoader(dsets['target']['test'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
## set network modules
base_net = network.network_dict[config.base_net]()
land_enc = network.network_dict[config.land_enc](land_num=config.land_num)
au_enc = network.network_dict[config.au_enc](au_num=config.au_num)
invar_shape_enc = network.network_dict[config.invar_shape_enc]()
feat_gen = network.network_dict[config.feat_gen]()
if use_gpu:
base_net = base_net.cuda()
land_enc = land_enc.cuda()
au_enc = au_enc.cuda()
invar_shape_enc = invar_shape_enc.cuda()
feat_gen = feat_gen.cuda()
base_net.train(False)
land_enc.train(False)
au_enc.train(False)
invar_shape_enc.train(False)
feat_gen.train(False)
print(base_net, land_enc, au_enc, invar_shape_enc, feat_gen)
if not os.path.exists(config.write_path_prefix + config.mode):
os.makedirs(config.write_path_prefix + config.mode)
if not os.path.exists(config.write_res_prefix + config.mode):
os.makedirs(config.write_res_prefix + config.mode)
test_type = 'target' # 'source'
if config.start_epoch <= 0:
raise (RuntimeError('start_epoch should be larger than 0\n'))
res_file = open(config.write_res_prefix + config.mode + '/' + test_type + '_test_AU_pred_' + str(config.start_epoch) + '.txt', 'w')
for epoch in range(config.start_epoch, config.n_epochs + 1):
base_net.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/base_net_' + str(epoch) + '.pth'))
land_enc.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/land_enc_' + str(epoch) + '.pth'))
au_enc.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/au_enc_' + str(epoch) + '.pth'))
invar_shape_enc.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/invar_shape_enc_' + str(epoch) + '.pth'))
feat_gen.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/feat_gen_' + str(epoch) + '.pth'))
if test_type == 'source':
f1score_arr, acc_arr = AU_detection_eval_src(dset_loaders[test_type]['test'], base_net, au_enc, use_gpu=use_gpu)
else:
f1score_arr, acc_arr = AU_detection_eval_tgt(dset_loaders[test_type]['test'], base_net, land_enc, au_enc,
invar_shape_enc, feat_gen, use_gpu=use_gpu)
print('epoch =%d, f1 score mean=%f, accuracy mean=%f' %(epoch, f1score_arr.mean(), acc_arr.mean()))
print>> res_file, '%d\t%f\t%f' % (epoch, f1score_arr.mean(), acc_arr.mean())
res_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--gpu_id', type=str, default='0', help='device id to run')
parser.add_argument('--crop_size', type=int, default=176, help='crop size for images')
parser.add_argument('--output_size', type=int, default=44, help='size for landmark response map')
parser.add_argument('--au_num', type=int, default=6, help='number of AUs')
parser.add_argument('--land_num', type=int, default=49, help='number of landmarks')
parser.add_argument('--eval_batch_size', type=int, default=4, help='mini-batch size for evaluation')
parser.add_argument('--start_epoch', type=int, default=1, help='starting epoch')
parser.add_argument('--n_epochs', type=int, default=10, help='number of total epochs')
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--mode', type=str, default='weak', choices=['weak', 'full'])
parser.add_argument('--base_net', type=str, default='Feat_Enc')
parser.add_argument('--land_enc', type=str, default='Land_Detect')
parser.add_argument('--au_enc', type=str, default='AU_Detect')
parser.add_argument('--invar_shape_enc', type=str, default='Texture_Enc')
parser.add_argument('--feat_gen', type=str, default='Generator')
# Directories.
parser.add_argument('--write_path_prefix', type=str, default='data/snapshots/')
parser.add_argument('--write_res_prefix', type=str, default='data/res/')
parser.add_argument('--src_test_path_prefix', type=str, default='data/list/BP4D_test')
parser.add_argument('--tgt_test_path_prefix', type=str, default='data/list/emotioNet_test')
config = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
print(config)
main(config)
| 5,813 | 43.381679 | 135 | py |
ADLD | ADLD-master/network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Feat_Enc(nn.Module):
def __init__(self):
super(Feat_Enc, self).__init__()
self.align_attention_features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.PReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.PReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Tanh(),
)
def forward(self, x):
align_output = self.align_attention_features(x)
return align_output
class AU_Detect(nn.Module):
def __init__(self, au_num):
super(AU_Detect, self).__init__()
self.aus_feat = nn.ModuleList([nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
) for i in range(au_num)])
self.aus_fc = nn.ModuleList([
nn.Linear(64, 1)
for i in range(au_num)])
def forward(self, x):
start = True
for i in range(len(self.aus_fc)):
au_feat = self.aus_feat[i](x)
au_feat_interm = F.avg_pool2d(au_feat, au_feat.size()[2:])
au_feat_interm = au_feat_interm.view(au_feat_interm.size(0), -1)
au_output = self.aus_fc[i](au_feat_interm)
if start:
aus_output = au_output
aus_feat = au_feat_interm
start = False
else:
aus_output = torch.cat((aus_output, au_output), 1)
aus_feat = torch.cat((aus_feat, au_feat_interm), 1)
return aus_feat, aus_output
class Land_Detect(nn.Module):
def __init__(self, land_num):
super(Land_Detect, self).__init__()
self.align_attention_features = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Tanh(),
nn.Conv2d(64, land_num, kernel_size=3, stride=1, padding=1),
)
def forward(self, x):
align_feat = self.align_attention_features[:-1](x)
align_output = self.align_attention_features[-1](align_feat)
start = True
for i in range(align_output.size(1)):
t_align_attention_feat_ori = align_output[:, i, :, :]
t_align_attention_feat = t_align_attention_feat_ori.view(t_align_attention_feat_ori.size(0), -1)
t_align_attention_feat = F.softmax(t_align_attention_feat, 1)
t_align_attention_feat = t_align_attention_feat.view(t_align_attention_feat_ori.size(0), 1,
t_align_attention_feat_ori.size(1),
t_align_attention_feat_ori.size(2))
if start:
align_attention = t_align_attention_feat
start = False
else:
align_attention = torch.cat((align_attention, t_align_attention_feat), 1)
return align_attention, align_feat, align_output
class Texture_Enc(nn.Module):
def __init__(self, inter_dim=64):
super(Texture_Enc, self).__init__()
self.irrele_shape_encoder = nn.Sequential(
nn.Conv2d(64, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Tanh(),
)
def forward(self, x):
irrele_shape_output = self.irrele_shape_encoder(x)
return irrele_shape_output
class Generator(nn.Module):
def __init__(self, input_dim1 = 1, input_dim2=64, inter_dim=128):
super(Generator, self).__init__()
self.feat_generator = nn.Sequential(
nn.Conv2d(input_dim1 + input_dim2, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim // 2, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim // 2),
nn.PReLU(),
nn.Conv2d(inter_dim // 2, inter_dim // 2, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim // 2),
nn.PReLU(),
nn.Conv2d(inter_dim // 2, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Tanh(),
)
def forward(self, align_attentions, irrele_shape_output):
assemble_align_attention = torch.sum(align_attentions, 1, True)
input = torch.cat((assemble_align_attention, irrele_shape_output), 1)
# input = torch.cat((align_attentions,irrele_shape_output),1)
output = self.feat_generator(input)
return output
class Land_Disc(nn.Module):
def __init__(self, land_num):
super(Land_Disc, self).__init__()
self.align_attention_features = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, land_num, kernel_size=3, stride=1, padding=1),
)
def forward(self, x):
align_output = self.align_attention_features(x)
return align_output
class Discriminator(nn.Module):
'''Discriminator model for source domain.'''
def __init__(self, input_dim=64, inter_dim = 64):
'''Init discriminator.'''
super(Discriminator, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(input_dim, inter_dim, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim * 2, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(inter_dim * 2),
nn.PReLU(),
nn.Conv2d(inter_dim * 2, inter_dim * 2, kernel_size=4, stride=2, padding=0),
# nn.InstanceNorm2d(inter_dim * 2),
nn.PReLU(),
nn.Conv2d(inter_dim * 2, 1, kernel_size=1, stride=1, padding=0)
)
self.input_dim = input_dim
def forward(self, input):
out = self.layer(input)
out = out.view(out.size(0), -1)
return out
network_dict = {'Feat_Enc':Feat_Enc, 'Land_Detect':Land_Detect, 'AU_Detect':AU_Detect, 'Land_Disc':Land_Disc,
'Texture_Enc':Texture_Enc, 'Generator':Generator, 'Discriminator':Discriminator} | 8,607 | 32.235521 | 109 | py |
ADLD | ADLD-master/lr_schedule.py | def inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001):
lr = init_lr * (1 + gamma * iter_num) ** (-power)
i = 0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_lr[i]
i += 1
return optimizer
def step_lr_scheduler(param_lr, optimizer, iter_num, gamma, stepsize, init_lr=0.001):
'''Decay learning rate by a factor of gamma every stepsize epochs.'''
lr = init_lr * (gamma ** (iter_num // stepsize))
i = 0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_lr[i]
i += 1
return optimizer
def lambda_lr_scheduler(param_lr, optimizer, epoch, n_epochs, offset, decay_start_epoch, init_lr=0.001):
lr = init_lr * (1.0 - max(0, epoch + offset - decay_start_epoch) / float(n_epochs - decay_start_epoch + 1))
i = 0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_lr[i]
i += 1
return optimizer
schedule_dict = {'inv': inv_lr_scheduler, 'step': step_lr_scheduler, 'lambda': lambda_lr_scheduler}
| 1,092 | 29.361111 | 111 | py |
ADLD | ADLD-master/data_list.py | import numpy as np
import random
from PIL import Image
def make_dataset(image_list, label):
len_ = len(image_list)
images = [(image_list[i].strip(), label[i, :]) for i in range(len_)]
return images
def make_dataset_land_au(image_list, land, au):
len_ = len(image_list)
images = [(image_list[i].strip(), land[i, :], au[i, :]) for i in range(len_)]
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
# Image value: [0,1]
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def default_loader(path):
return pil_loader(path)
class ImageList_au(object):
def __init__(self, path, transform=None, target_transform=None,
loader=default_loader):
image_list = open(path + '_path.txt').readlines()
labels = np.loadtxt(path + '_AUoccur.txt')
imgs = make_dataset(image_list, labels)
if len(imgs) == 0:
raise (RuntimeError('Found 0 images in subfolders of: ' + path + '\n'))
self.imgs = imgs
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
'''
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
'''
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
class ImageList_land_au(object):
def __init__(self, crop_size, path, transform=None, target_transform=None,
loader=default_loader):
image_list = open(path + '_path.txt').readlines()
land = np.loadtxt(path + '_land.txt')
au = np.loadtxt(path + '_AUoccur.txt')
imgs = make_dataset_land_au(image_list, land, au)
if len(imgs) == 0:
raise (RuntimeError('Found 0 images in subfolders of: ' + path + '\n'))
self.imgs = imgs
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.crop_size = crop_size
def __getitem__(self, index):
'''
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
'''
path, land, au = self.imgs[index]
img = self.loader(path)
w, h = img.size
offset_y = random.randint(0, h - self.crop_size)
offset_x = random.randint(0, w - self.crop_size)
flip = random.randint(0, 1)
if self.transform is not None:
img = self.transform(img, flip, offset_x, offset_y)
if self.target_transform is not None:
land = self.target_transform(land, flip, offset_x, offset_y)
return img, land, au
def __len__(self):
return len(self.imgs) | 3,195 | 28.321101 | 101 | py |
ADLD | ADLD-master/util.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sklearn
from sklearn.metrics import accuracy_score, f1_score
def AU_detection_eval_src(loader, base_net, au_enc, use_gpu=True):
missing_label = 999
for i, batch in enumerate(loader):
input, label = batch
if use_gpu:
input, label = input.cuda(), label.cuda()
base_feat = base_net(input)
au_feat, au_output = au_enc(base_feat)
au_output = F.sigmoid(au_output)
if i == 0:
all_output = au_output.data.cpu().float()
all_label = label.data.cpu().float()
else:
all_output = torch.cat((all_output, au_output.data.cpu().float()), 0)
all_label = torch.cat((all_label, label.data.cpu().float()), 0)
AUoccur_pred_prob = all_output.data.numpy()
AUoccur_actual = all_label.data.numpy()
AUoccur_pred = np.zeros(AUoccur_pred_prob.shape)
AUoccur_pred[AUoccur_pred_prob < 0.5] = 0
AUoccur_pred[AUoccur_pred_prob >= 0.5] = 1
AUoccur_actual = AUoccur_actual.transpose((1, 0))
AUoccur_pred = AUoccur_pred.transpose((1, 0))
f1score_arr = np.zeros(AUoccur_actual.shape[0])
acc_arr = np.zeros(AUoccur_actual.shape[0])
for i in range(AUoccur_actual.shape[0]):
curr_actual = AUoccur_actual[i]
curr_pred = AUoccur_pred[i]
new_curr_actual = curr_actual[curr_actual != missing_label]
new_curr_pred = curr_pred[curr_actual != missing_label]
f1score_arr[i] = f1_score(new_curr_actual, new_curr_pred)
acc_arr[i] = accuracy_score(new_curr_actual, new_curr_pred)
return f1score_arr, acc_arr
def AU_detection_eval_tgt(loader, base_net, land_enc, au_enc, invar_shape_enc, feat_gen, use_gpu=True):
missing_label = 999
for i, batch in enumerate(loader):
input, label = batch
if use_gpu:
input, label = input.cuda(), label.cuda()
base_feat = base_net(input)
align_attention, align_feat, align_output = land_enc(base_feat)
invar_shape_output = invar_shape_enc(base_feat)
new_gen = feat_gen(align_attention, invar_shape_output)
new_gen_au_feat, new_gen_au_output = au_enc(new_gen)
au_output = F.sigmoid(new_gen_au_output)
if i == 0:
all_output = au_output.data.cpu().float()
all_label = label.data.cpu().float()
else:
all_output = torch.cat((all_output, au_output.data.cpu().float()), 0)
all_label = torch.cat((all_label, label.data.cpu().float()), 0)
AUoccur_pred_prob = all_output.data.numpy()
AUoccur_actual = all_label.data.numpy()
AUoccur_pred = np.zeros(AUoccur_pred_prob.shape)
AUoccur_pred[AUoccur_pred_prob < 0.5] = 0
AUoccur_pred[AUoccur_pred_prob >= 0.5] = 1
AUoccur_actual = AUoccur_actual.transpose((1, 0))
AUoccur_pred = AUoccur_pred.transpose((1, 0))
f1score_arr = np.zeros(AUoccur_actual.shape[0])
acc_arr = np.zeros(AUoccur_actual.shape[0])
for i in range(AUoccur_actual.shape[0]):
curr_actual = AUoccur_actual[i]
curr_pred = AUoccur_pred[i]
new_curr_actual = curr_actual[curr_actual != missing_label]
new_curr_pred = curr_pred[curr_actual != missing_label]
f1score_arr[i] = f1_score(new_curr_actual, new_curr_pred)
acc_arr[i] = accuracy_score(new_curr_actual, new_curr_pred)
return f1score_arr, acc_arr
def land_softmax_loss(input, target, weight=None, size_average=True, reduce=True):
classify_loss = nn.CrossEntropyLoss(weight=weight, size_average=size_average, reduce=reduce)
for i in range(input.size(1)):
t_input = input[:, i, :, :]
t_input = t_input.view(t_input.size(0), -1)
t_target = target[:, i]
t_loss = classify_loss(t_input, t_target)
t_loss = torch.unsqueeze(t_loss, 0)
if i == 0:
loss = t_loss
else:
loss = torch.cat((loss, t_loss), 0)
if size_average:
return loss.mean()
else:
return loss.sum()
def land_adaptation_loss(input, size_average=True, reduce=True):
classify_loss = nn.MSELoss(size_average=size_average, reduce=reduce)
use_gpu = torch.cuda.is_available()
for i in range(input.size(1)):
t_input = input[:, i, :, :]
t_input = t_input.view(t_input.size(0), -1)
t_target = torch.ones(t_input.size()) * 1.0 / t_input.size(1)
if use_gpu:
t_target = t_target.cuda()
t_loss = classify_loss(t_input, t_target)
t_loss = torch.unsqueeze(t_loss, 0)
if i == 0:
loss = t_loss
else:
loss = torch.cat((loss, t_loss), 0)
if size_average:
return loss.mean()
else:
return loss.sum()
def land_discriminator_loss(input, target, size_average=True, reduce=True):
classify_loss = nn.MSELoss(size_average=size_average, reduce=reduce)
use_gpu = torch.cuda.is_available()
for i in range(input.size(1)):
t_input = input[:, i, :, :]
t_input = t_input.view(t_input.size(0), -1)
t_target = torch.zeros(t_input.size())
if use_gpu:
t_target = t_target.cuda()
t_true_target = target[:, i]
for j in range(t_true_target.size(0)):
t_target[j, t_true_target[j]] = 1
t_loss = classify_loss(t_input, t_target)
t_loss = torch.unsqueeze(t_loss, 0)
if i == 0:
loss = t_loss
else:
loss = torch.cat((loss, t_loss), 0)
if size_average:
return loss.mean()
else:
return loss.sum() | 5,630 | 33.335366 | 103 | py |
ADLD | ADLD-master/pre_process.py | import numpy as np
from torchvision import transforms
from PIL import Image
class PlaceCrop(object):
"""Crops the given PIL.Image at the particular index.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (w, h), a square crop (size, size) is
made.
"""
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class SetFlip(object):
def __init__(self, flip):
self.flip = flip
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if self.flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class land_transform(object):
def __init__(self, output_size, scale, flip_reflect):
self.output_size = output_size
self.scale = scale
self.flip_reflect = flip_reflect.astype(int) - 1
def __call__(self, land, flip, offset_x, offset_y):
land_label = np.zeros((len(land) / 2))
land[0:len(land):2] = (land[0:len(land):2] - offset_x) / float(self.scale)
land[1:len(land):2] = (land[1:len(land):2] - offset_y) / float(self.scale)
# change the landmark orders when flipping
if flip:
land[0:len(land):2] = self.output_size - 1 - land[0:len(land):2]
land[0:len(land):2] = land[0:len(land):2][self.flip_reflect]
land[1:len(land):2] = land[1:len(land):2][self.flip_reflect]
# landmark location refinement for predefined AU centers
ruler = abs(land[2 * 22] - land[2 * 25])
land[2 * 4 + 1] = land[2 * 4 + 1] - ruler / 2
land[2 * 5 + 1] = land[2 * 5 + 1] - ruler / 2
land[2 * 1 + 1] = land[2 * 1 + 1] - ruler / 3
land[2 * 8 + 1] = land[2 * 8 + 1] - ruler / 3
land[2 * 2 + 1] = land[2 * 2 + 1] + ruler / 3
land[2 * 7 + 1] = land[2 * 7 + 1] + ruler / 3
land[2 * 24 + 1] = land[2 * 24 + 1] + ruler
land[2 * 29 + 1] = land[2 * 29 + 1] + ruler
land[2 * 15 + 1] = land[2 * 15 + 1] - ruler / 2
land[2 * 17 + 1] = land[2 * 17 + 1] - ruler / 2
land[2 * 39 + 1] = land[2 * 39 + 1] + ruler / 2
land[2 * 41 + 1] = land[2 * 41 + 1] + ruler / 2
land = (np.around(land)).astype(int)
for i in range(len(land) / 2):
land[2 * i] = min(max(land[2 * i], 0), self.output_size - 1)
land[2 * i + 1] = min(max(land[2 * i + 1], 0), self.output_size - 1)
land_label[i] = land[2 * i + 1] * self.output_size + land[2 * i]
return land_label
class image_train(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img, flip, offset_x, offset_y):
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
transform = transforms.Compose([
PlaceCrop(self.crop_size, offset_x, offset_y),
SetFlip(flip),
transforms.ToTensor(),
normalize
])
img = transform(img)
return img
def image_test(crop_size=176):
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
return transforms.Compose([
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize
]) | 3,931 | 32.322034 | 91 | py |
ADLD | ADLD-master/train.py | import argparse
import os
import torch.optim as optim
import torch.utils.data as util_data
import itertools
import network
import pre_process as prep
import lr_schedule
from util import *
from data_list import ImageList_au, ImageList_land_au
optim_dict = {'SGD': optim.SGD, 'Adam': optim.Adam}
def main(config):
## set loss criterion
use_gpu = torch.cuda.is_available()
au_weight_src = torch.from_numpy(np.loadtxt(config.src_train_path_prefix + '_weight.txt'))
if use_gpu:
au_weight_src = au_weight_src.float().cuda()
else:
au_weight_src = au_weight_src.float()
au_class_criterion = nn.BCEWithLogitsLoss(au_weight_src)
land_predict_criterion = land_softmax_loss
discriminator_criterion = nn.MSELoss()
reconstruct_criterion = nn.L1Loss()
land_discriminator_criterion = land_discriminator_loss
land_adaptation_criterion = land_adaptation_loss
## prepare data
dsets = {}
dset_loaders = {}
dsets['source'] = {}
dset_loaders['source'] = {}
dsets['source']['train'] = ImageList_land_au(config.crop_size, config.src_train_path_prefix,
transform=prep.image_train(crop_size=config.crop_size),
target_transform=prep.land_transform(output_size=config.output_size,
scale=config.crop_size / config.output_size,
flip_reflect=np.loadtxt(
config.flip_reflect)))
dset_loaders['source']['train'] = util_data.DataLoader(dsets['source']['train'], batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
dsets['source']['val'] = ImageList_au(config.src_val_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['source']['val'] = util_data.DataLoader(dsets['source']['val'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
dsets['target'] = {}
dset_loaders['target'] = {}
dsets['target']['train'] = ImageList_land_au(config.crop_size, config.tgt_train_path_prefix,
transform=prep.image_train(crop_size=config.crop_size),
target_transform=prep.land_transform(output_size=config.output_size,
scale=config.crop_size / config.output_size,
flip_reflect=np.loadtxt(
config.flip_reflect)))
dset_loaders['target']['train'] = util_data.DataLoader(dsets['target']['train'], batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
dsets['target']['val'] = ImageList_au(config.tgt_val_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['target']['val'] = util_data.DataLoader(dsets['target']['val'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
## set network modules
base_net = network.network_dict[config.base_net]()
land_enc = network.network_dict[config.land_enc](land_num=config.land_num)
land_enc_store = network.network_dict[config.land_enc](land_num=config.land_num)
au_enc = network.network_dict[config.au_enc](au_num=config.au_num)
invar_shape_enc = network.network_dict[config.invar_shape_enc]()
feat_gen = network.network_dict[config.feat_gen]()
invar_shape_disc = network.network_dict[config.invar_shape_disc](land_num=config.land_num)
feat_gen_disc_src = network.network_dict[config.feat_gen_disc]()
feat_gen_disc_tgt = network.network_dict[config.feat_gen_disc]()
if config.start_epoch > 0:
base_net.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/base_net_' + str(config.start_epoch) + '.pth'))
land_enc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/land_enc_' + str(config.start_epoch) + '.pth'))
au_enc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/au_enc_' + str(config.start_epoch) + '.pth'))
invar_shape_enc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/invar_shape_enc_' + str(config.start_epoch) + '.pth'))
feat_gen.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/feat_gen_' + str(config.start_epoch) + '.pth'))
invar_shape_disc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/invar_shape_disc_' + str(config.start_epoch) + '.pth'))
feat_gen_disc_src.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/feat_gen_disc_src_' + str(config.start_epoch) + '.pth'))
feat_gen_disc_tgt.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/feat_gen_disc_tgt_' + str(config.start_epoch) + '.pth'))
if use_gpu:
base_net = base_net.cuda()
land_enc = land_enc.cuda()
land_enc_store = land_enc_store.cuda()
au_enc = au_enc.cuda()
invar_shape_enc = invar_shape_enc.cuda()
feat_gen = feat_gen.cuda()
invar_shape_disc = invar_shape_disc.cuda()
feat_gen_disc_src = feat_gen_disc_src.cuda()
feat_gen_disc_tgt = feat_gen_disc_tgt.cuda()
## collect parameters
base_net_parameter_list = [{'params': filter(lambda p: p.requires_grad, base_net.parameters()), 'lr': 1}]
land_enc_parameter_list = [{'params': filter(lambda p: p.requires_grad, land_enc.parameters()), 'lr': 1}]
au_enc_parameter_list = [{'params': filter(lambda p: p.requires_grad, au_enc.parameters()), 'lr': 1}]
invar_shape_enc_parameter_list = [
{'params': filter(lambda p: p.requires_grad, invar_shape_enc.parameters()), 'lr': 1}]
feat_gen_parameter_list = [{'params': filter(lambda p: p.requires_grad, feat_gen.parameters()), 'lr': 1}]
invar_shape_disc_parameter_list = [
{'params': filter(lambda p: p.requires_grad, invar_shape_disc.parameters()), 'lr': 1}]
feat_gen_disc_src_parameter_list = [
{'params': filter(lambda p: p.requires_grad, feat_gen_disc_src.parameters()), 'lr': 1}]
feat_gen_disc_tgt_parameter_list = [
{'params': filter(lambda p: p.requires_grad, feat_gen_disc_tgt.parameters()), 'lr': 1}]
## set optimizer
Gen_optimizer = optim_dict[config.gen_optimizer_type](
itertools.chain(invar_shape_enc_parameter_list, feat_gen_parameter_list),
1.0, [config.gen_beta1, config.gen_beta2])
Task_optimizer = optim_dict[config.task_optimizer_type](
itertools.chain(base_net_parameter_list, land_enc_parameter_list, au_enc_parameter_list),
1.0, [config.task_beta1, config.task_beta2])
Disc_optimizer = optim_dict[config.gen_optimizer_type](
itertools.chain(invar_shape_disc_parameter_list, feat_gen_disc_src_parameter_list,
feat_gen_disc_tgt_parameter_list), 1.0, [config.gen_beta1, config.gen_beta2])
Gen_param_lr = []
for param_group in Gen_optimizer.param_groups:
Gen_param_lr.append(param_group['lr'])
Task_param_lr = []
for param_group in Task_optimizer.param_groups:
Task_param_lr.append(param_group['lr'])
Disc_param_lr = []
for param_group in Disc_optimizer.param_groups:
Disc_param_lr.append(param_group['lr'])
Gen_lr_scheduler = lr_schedule.schedule_dict[config.gen_lr_type]
Task_lr_scheduler = lr_schedule.schedule_dict[config.task_lr_type]
Disc_lr_scheduler = lr_schedule.schedule_dict[config.gen_lr_type]
print(base_net, land_enc, au_enc, invar_shape_enc, feat_gen)
print(invar_shape_disc, feat_gen_disc_src, feat_gen_disc_tgt)
if not os.path.exists(config.write_path_prefix + config.mode):
os.makedirs(config.write_path_prefix + config.mode)
if not os.path.exists(config.write_res_prefix + config.mode):
os.makedirs(config.write_res_prefix + config.mode)
val_type = 'target' # 'source'
res_file = open(config.write_res_prefix + config.mode + '/' + val_type + '_AU_pred_' + str(config.start_epoch) + '.txt', 'w')
## train
len_train_tgt = len(dset_loaders['target']['train'])
count = 0
for epoch in range(config.start_epoch, config.n_epochs + 1):
# eval in the train
if epoch >= config.start_epoch:
base_net.train(False)
land_enc.train(False)
au_enc.train(False)
invar_shape_enc.train(False)
feat_gen.train(False)
if val_type == 'source':
f1score_arr, acc_arr = AU_detection_eval_src(dset_loaders[val_type]['val'], base_net, au_enc, use_gpu=use_gpu)
else:
f1score_arr, acc_arr = AU_detection_eval_tgt(dset_loaders[val_type]['val'], base_net, land_enc, au_enc,
invar_shape_enc, feat_gen, use_gpu=use_gpu)
print('epoch =%d, f1 score mean=%f, accuracy mean=%f' %(epoch, f1score_arr.mean(), acc_arr.mean()))
print>> res_file, '%d\t%f\t%f' % (epoch, f1score_arr.mean(), acc_arr.mean())
base_net.train(True)
land_enc.train(True)
au_enc.train(True)
invar_shape_enc.train(True)
feat_gen.train(True)
if epoch > config.start_epoch:
print('taking snapshot ...')
torch.save(base_net.state_dict(), config.write_path_prefix + config.mode + '/base_net_' + str(epoch) + '.pth')
torch.save(land_enc.state_dict(), config.write_path_prefix + config.mode + '/land_enc_' + str(epoch) + '.pth')
torch.save(au_enc.state_dict(), config.write_path_prefix + config.mode + '/au_enc_' + str(epoch) + '.pth')
torch.save(invar_shape_enc.state_dict(), config.write_path_prefix + config.mode + '/invar_shape_enc_' + str(epoch) + '.pth')
torch.save(feat_gen.state_dict(), config.write_path_prefix + config.mode + '/feat_gen_' + str(epoch) + '.pth')
torch.save(invar_shape_disc.state_dict(), config.write_path_prefix + config.mode + '/invar_shape_disc_' + str(epoch) + '.pth')
torch.save(feat_gen_disc_src.state_dict(), config.write_path_prefix + config.mode + '/feat_gen_disc_src_' + str(epoch) + '.pth')
torch.save(feat_gen_disc_tgt.state_dict(), config.write_path_prefix + config.mode + '/feat_gen_disc_tgt_' + str(epoch) + '.pth')
if epoch >= config.n_epochs:
break
for i, batch_src in enumerate(dset_loaders['source']['train']):
if i % config.display == 0 and count > 0:
print(
'[epoch = %d][iter = %d][loss_disc = %f][loss_invar_shape_disc = %f][loss_gen_disc = %f][total_loss = %f][loss_invar_shape_adaptation = %f][loss_gen_adaptation = %f][loss_self_recons = %f][loss_gen_cycle = %f][loss_au = %f][loss_land = %f]' % (
epoch, i, loss_disc.data.cpu().numpy(), loss_invar_shape_disc.data.cpu().numpy(),
loss_gen_disc.data.cpu().numpy(), total_loss.data.cpu().numpy(),
loss_invar_shape_adaptation.data.cpu().numpy(), loss_gen_adaptation.data.cpu().numpy(),
loss_self_recons.data.cpu().numpy(), loss_gen_cycle.data.cpu().numpy(),
loss_au.data.cpu().numpy(), loss_land.data.cpu().numpy()))
print('learning rate = %f, %f, %f' % (Disc_optimizer.param_groups[0]['lr'], Gen_optimizer.param_groups[0]['lr'], Task_optimizer.param_groups[0]['lr']))
print('the number of training iterations is %d' % (count))
input_src, land_src, au_src = batch_src
if count % len_train_tgt == 0:
if count > 0:
dset_loaders['target']['train'] = util_data.DataLoader(dsets['target']['train'], batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
iter_data_tgt = iter(dset_loaders['target']['train'])
input_tgt, land_tgt, au_tgt = iter_data_tgt.next()
if input_tgt.size(0) > input_src.size(0):
input_tgt, land_tgt, au_tgt = input_tgt[0:input_src.size(0), :, :, :], land_tgt[0:input_src.size(0),
:], au_tgt[
0:input_src.size(0)]
elif input_tgt.size(0) < input_src.size(0):
input_src, land_src, au_src = input_src[0:input_tgt.size(0), :, :, :], land_src[0:input_tgt.size(0),
:], au_src[
0:input_tgt.size(0)]
if use_gpu:
input_src, land_src, au_src, input_tgt, land_tgt, au_tgt = \
input_src.cuda(), land_src.long().cuda(), au_src.float().cuda(), \
input_tgt.cuda(), land_tgt.long().cuda(), au_tgt.float().cuda()
else:
land_src, au_src, land_tgt, au_tgt = \
land_src.long(), au_src.float(), land_tgt.long(), au_tgt.float()
land_enc_store.load_state_dict(land_enc.state_dict())
base_feat_src = base_net(input_src)
align_attention_src, align_feat_src, align_output_src = land_enc(base_feat_src)
au_feat_src, au_output_src = au_enc(base_feat_src)
base_feat_tgt = base_net(input_tgt)
align_attention_tgt, align_feat_tgt, align_output_tgt = land_enc(base_feat_tgt)
au_feat_tgt, au_output_tgt = au_enc(base_feat_tgt)
invar_shape_output_src = invar_shape_enc(base_feat_src.detach())
invar_shape_output_tgt = invar_shape_enc(base_feat_tgt.detach())
# new_gen
new_gen_tgt = feat_gen(align_attention_src.detach(), invar_shape_output_tgt)
new_gen_src = feat_gen(align_attention_tgt.detach(), invar_shape_output_src)
# recons_gen
recons_gen_src = feat_gen(align_attention_src.detach(), invar_shape_output_src)
recons_gen_tgt = feat_gen(align_attention_tgt.detach(), invar_shape_output_tgt)
# new2_gen
new_gen_invar_shape_output_src = invar_shape_enc(new_gen_src.detach())
new_gen_invar_shape_output_tgt = invar_shape_enc(new_gen_tgt.detach())
new_gen_align_attention_src, new_gen_align_feat_src, new_gen_align_output_src = land_enc_store(new_gen_src)
new_gen_align_attention_tgt, new_gen_align_feat_tgt, new_gen_align_output_tgt = land_enc_store(new_gen_tgt)
new2_gen_tgt = feat_gen(new_gen_align_attention_src.detach(), new_gen_invar_shape_output_tgt)
new2_gen_src = feat_gen(new_gen_align_attention_tgt.detach(), new_gen_invar_shape_output_src)
############################
# 1. train discriminator #
############################
Disc_optimizer = Disc_lr_scheduler(Disc_param_lr, Disc_optimizer, epoch, config.n_epochs,
1, config.decay_start_epoch, config.gen_lr)
Disc_optimizer.zero_grad()
align_output_invar_shape_src = invar_shape_disc(
invar_shape_output_src.detach())
align_output_invar_shape_tgt = invar_shape_disc(
invar_shape_output_tgt.detach())
# loss_invar_shape_disc
loss_base_invar_shape_disc_src = land_discriminator_criterion(align_output_invar_shape_src, land_src)
loss_base_invar_shape_disc_tgt = land_discriminator_criterion(align_output_invar_shape_tgt, land_tgt)
loss_invar_shape_disc = (loss_base_invar_shape_disc_src + loss_base_invar_shape_disc_tgt) * 0.5
base_gen_src_pred = feat_gen_disc_src(base_feat_src.detach())
new_gen_src_pred = feat_gen_disc_src(new_gen_src.detach())
real_label = torch.ones((base_feat_src.size(0), 1))
fake_label = torch.zeros((base_feat_src.size(0), 1))
if use_gpu:
real_label, fake_label = real_label.cuda(), fake_label.cuda()
# loss_gen_disc_src
loss_base_gen_src = discriminator_criterion(base_gen_src_pred, real_label)
loss_new_gen_src = discriminator_criterion(new_gen_src_pred, fake_label)
loss_gen_disc_src = (loss_base_gen_src + loss_new_gen_src) * 0.5
base_gen_tgt_pred = feat_gen_disc_tgt(base_feat_tgt.detach())
new_gen_tgt_pred = feat_gen_disc_tgt(new_gen_tgt.detach())
# loss_gen_disc_tgt
loss_base_gen_tgt = discriminator_criterion(base_gen_tgt_pred, real_label)
loss_new_gen_tgt = discriminator_criterion(new_gen_tgt_pred, fake_label)
loss_gen_disc_tgt = (loss_base_gen_tgt + loss_new_gen_tgt) * 0.5
# loss_gen_disc
loss_gen_disc = (loss_gen_disc_src + loss_gen_disc_tgt) * 0.5
loss_disc = loss_invar_shape_disc + loss_gen_disc
loss_disc.backward()
# optimize discriminator
Disc_optimizer.step()
############################
# 2. train base network #
############################
Gen_optimizer = Gen_lr_scheduler(Gen_param_lr, Gen_optimizer, epoch, config.n_epochs,
1, config.decay_start_epoch, config.gen_lr)
Gen_optimizer.zero_grad()
Task_optimizer = Task_lr_scheduler(Task_param_lr, Task_optimizer, epoch, config.n_epochs,
1, config.decay_start_epoch, config.task_lr)
Task_optimizer.zero_grad()
align_output_invar_shape_src = invar_shape_disc(invar_shape_output_src)
align_output_invar_shape_tgt = invar_shape_disc(invar_shape_output_tgt)
# loss_invar_shape_adaptation
loss_base_invar_shape_adaptation_src = land_adaptation_criterion(align_output_invar_shape_src)
loss_base_invar_shape_adaptation_tgt = land_adaptation_criterion(align_output_invar_shape_tgt)
loss_invar_shape_adaptation = (
loss_base_invar_shape_adaptation_src + loss_base_invar_shape_adaptation_tgt) * 0.5
new_gen_src_pred = feat_gen_disc_src(new_gen_src)
loss_gen_adaptation_src = discriminator_criterion(new_gen_src_pred, real_label)
new_gen_tgt_pred = feat_gen_disc_tgt(new_gen_tgt)
loss_gen_adaptation_tgt = discriminator_criterion(new_gen_tgt_pred, real_label)
# loss_gen_adaptation
loss_gen_adaptation = (loss_gen_adaptation_src + loss_gen_adaptation_tgt) * 0.5
loss_gen_cycle_src = reconstruct_criterion(new2_gen_src, base_feat_src.detach())
loss_gen_cycle_tgt = reconstruct_criterion(new2_gen_tgt, base_feat_tgt.detach())
# loss_gen_cycle
loss_gen_cycle = (loss_gen_cycle_src + loss_gen_cycle_tgt) * 0.5
loss_self_recons_src = reconstruct_criterion(recons_gen_src, base_feat_src.detach())
loss_self_recons_tgt = reconstruct_criterion(recons_gen_tgt, base_feat_tgt.detach())
# loss_self_recons
loss_self_recons = (loss_self_recons_src + loss_self_recons_tgt) * 0.5
loss_base_gen_au_src = au_class_criterion(au_output_src, au_src)
loss_base_gen_au_tgt = au_class_criterion(au_output_tgt, au_tgt)
loss_base_gen_land_src = land_predict_criterion(align_output_src, land_src)
loss_base_gen_land_tgt = land_predict_criterion(align_output_tgt, land_tgt)
new_gen_au_feat_src, new_gen_au_output_src = au_enc(new_gen_src)
new_gen_au_feat_tgt, new_gen_au_output_tgt = au_enc(new_gen_tgt)
loss_new_gen_au_src = au_class_criterion(new_gen_au_output_src, au_tgt)
loss_new_gen_au_tgt = au_class_criterion(new_gen_au_output_tgt, au_src)
loss_new_gen_land_src = land_predict_criterion(new_gen_align_output_src, land_tgt)
loss_new_gen_land_tgt = land_predict_criterion(new_gen_align_output_tgt, land_src)
# loss_land
loss_land = (
loss_base_gen_land_src + loss_base_gen_land_tgt + loss_new_gen_land_src + loss_new_gen_land_tgt) * 0.5
# loss_au
if config.mode == 'weak':
loss_au = (loss_base_gen_au_src + loss_new_gen_au_tgt) * 0.5
else:
loss_au = (loss_base_gen_au_src + loss_base_gen_au_tgt + loss_new_gen_au_src + loss_new_gen_au_tgt) * 0.25
total_loss = config.lambda_land_adv * loss_invar_shape_adaptation + \
config.lambda_feat_adv * loss_gen_adaptation + \
config.lambda_cross_cycle * loss_gen_cycle + config.lambda_self_recons * loss_self_recons + \
config.lambda_au * loss_au + config.lambda_land * loss_land
total_loss.backward()
Gen_optimizer.step()
Task_optimizer.step()
count = count + 1
res_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--gpu_id', type=str, default='0', help='device id to run')
parser.add_argument('--crop_size', type=int, default=176, help='crop size for images')
parser.add_argument('--output_size', type=int, default=44, help='size for landmark response map')
parser.add_argument('--au_num', type=int, default=6, help='number of AUs')
parser.add_argument('--land_num', type=int, default=49, help='number of landmarks')
parser.add_argument('--train_batch_size', type=int, default=16, help='mini-batch size for training')
parser.add_argument('--eval_batch_size', type=int, default=4, help='mini-batch size for evaluation')
parser.add_argument('--start_epoch', type=int, default=0, help='starting epoch')
parser.add_argument('--n_epochs', type=int, default=10, help='number of total epochs')
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--mode', type=str, default='weak', choices=['weak', 'full'])
parser.add_argument('--base_net', type=str, default='Feat_Enc')
parser.add_argument('--land_enc', type=str, default='Land_Detect')
parser.add_argument('--au_enc', type=str, default='AU_Detect')
parser.add_argument('--invar_shape_enc', type=str, default='Texture_Enc')
parser.add_argument('--feat_gen', type=str, default='Generator')
parser.add_argument('--invar_shape_disc', type=str, default='Land_Disc')
parser.add_argument('--feat_gen_disc', type=str, default='Discriminator')
# Training configuration.
parser.add_argument('--lambda_au', type=float, default=1, help='weight for AU detection loss')
parser.add_argument('--lambda_land', type=float, default=0.6, help='weight for landmark detection loss')
parser.add_argument('--lambda_land_adv', type=float, default=400, help='weight for landmark adversarial loss')
parser.add_argument('--lambda_feat_adv', type=float, default=1.2, help='weight for feature adversarial loss')
parser.add_argument('--lambda_cross_cycle', type=float, default=40, help='weight for cross-cycle consistency loss')
parser.add_argument('--lambda_self_recons', type=float, default=3, help='weight for self-reconstruction loss')
parser.add_argument('--display', type=int, default=100, help='iteration gaps for displaying')
parser.add_argument('--gen_optimizer_type', type=str, default='Adam')
parser.add_argument('--gen_beta1', type=float, default=0.5, help='beta1 for Adam optimizer of generation')
parser.add_argument('--gen_beta2', type=float, default=0.9, help='beta2 for Adam optimizer of generation')
parser.add_argument('--gen_lr_type', type=str, default='lambda')
parser.add_argument('--gen_lr', type=float, default=5e-5, help='learning rate for generation')
parser.add_argument('--task_optimizer_type', type=str, default='Adam')
parser.add_argument('--task_beta1', type=float, default=0.95, help='beta1 for Adam optimizer of task')
parser.add_argument('--task_beta2', type=float, default=0.999, help='beta2 for Adam optimizer of task')
parser.add_argument('--task_lr_type', type=str, default='lambda')
parser.add_argument('--task_lr', type=float, default=1e-4, help='learning rate for task')
parser.add_argument('--decay_start_epoch', type=int, default=5, help='epoch for decaying lr')
# Directories.
parser.add_argument('--write_path_prefix', type=str, default='data/snapshots/')
parser.add_argument('--write_res_prefix', type=str, default='data/res/')
parser.add_argument('--flip_reflect', type=str, default='data/list/reflect_49.txt')
parser.add_argument('--src_train_path_prefix', type=str, default='data/list/BP4D_train')
parser.add_argument('--src_val_path_prefix', type=str, default='data/list/BP4D_val')
parser.add_argument('--src_test_path_prefix', type=str, default='data/list/BP4D_test')
parser.add_argument('--tgt_train_path_prefix', type=str, default='data/list/emotioNet_train')
parser.add_argument('--tgt_val_path_prefix', type=str, default='data/list/emotioNet_val')
parser.add_argument('--tgt_test_path_prefix', type=str, default='data/list/emotioNet_test')
config = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
print(config)
main(config) | 26,351 | 58.485327 | 272 | py |
ADLD | ADLD-master/dataset/face_transform.py | import cv2
import numpy as np
import os
import math
def align_face_49pts(img, img_land, box_enlarge, img_size):
leftEye0 = (img_land[2 * 19] + img_land[2 * 20] + img_land[2 * 21] + img_land[2 * 22] + img_land[2 * 23] +
img_land[2 * 24]) / 6.0
leftEye1 = (img_land[2 * 19 + 1] + img_land[2 * 20 + 1] + img_land[2 * 21 + 1] + img_land[2 * 22 + 1] +
img_land[2 * 23 + 1] + img_land[2 * 24 + 1]) / 6.0
rightEye0 = (img_land[2 * 25] + img_land[2 * 26] + img_land[2 * 27] + img_land[2 * 28] + img_land[2 * 29] +
img_land[2 * 30]) / 6.0
rightEye1 = (img_land[2 * 25 + 1] + img_land[2 * 26 + 1] + img_land[2 * 27 + 1] + img_land[2 * 28 + 1] +
img_land[2 * 29 + 1] + img_land[2 * 30 + 1]) / 6.0
deltaX = (rightEye0 - leftEye0)
deltaY = (rightEye1 - leftEye1)
l = math.sqrt(deltaX * deltaX + deltaY * deltaY)
sinVal = deltaY / l
cosVal = deltaX / l
mat1 = np.mat([[cosVal, sinVal, 0], [-sinVal, cosVal, 0], [0, 0, 1]])
mat2 = np.mat([[leftEye0, leftEye1, 1], [rightEye0, rightEye1, 1], [img_land[2 * 13], img_land[2 * 13 + 1], 1],
[img_land[2 * 31], img_land[2 * 31 + 1], 1], [img_land[2 * 37], img_land[2 * 37 + 1], 1]])
mat2 = (mat1 * mat2.T).T
cx = float((max(mat2[:, 0]) + min(mat2[:, 0]))) * 0.5
cy = float((max(mat2[:, 1]) + min(mat2[:, 1]))) * 0.5
if (float(max(mat2[:, 0]) - min(mat2[:, 0])) > float(max(mat2[:, 1]) - min(mat2[:, 1]))):
halfSize = 0.5 * box_enlarge * float((max(mat2[:, 0]) - min(mat2[:, 0])))
else:
halfSize = 0.5 * box_enlarge * float((max(mat2[:, 1]) - min(mat2[:, 1])))
scale = (img_size - 1) / 2.0 / halfSize
mat3 = np.mat([[scale, 0, scale * (halfSize - cx)], [0, scale, scale * (halfSize - cy)], [0, 0, 1]])
mat = mat3 * mat1
aligned_img = cv2.warpAffine(img, mat[0:2, :], (img_size, img_size), cv2.INTER_LINEAR, borderValue=(128, 128, 128))
land_3d = np.ones((len(img_land)/2, 3))
land_3d[:, 0:2] = np.reshape(np.array(img_land), (len(img_land)/2, 2))
mat_land_3d = np.mat(land_3d)
new_land = np.array((mat * mat_land_3d.T).T)
new_land = np.reshape(new_land[:, 0:2], len(img_land))
return aligned_img, new_land
list_path_prefix = '../data/list/'
write_path_prefix = '../data/imgs/'
box_enlarge = 2.8
img_size = 200
all_imgs_path = open(list_path_prefix + 'emotioNet_test_path.txt').readlines()
all_imgs_land = np.loadtxt('emotioNet_test_land.txt')
# Make the landmarks be indexed from 0
all_imgs_land = all_imgs_land - 1
if not os.path.exists(write_path_prefix):
os.makedirs(write_path_prefix)
all_imgs_new_land = np.zeros(all_imgs_land.shape)
for i in range(len(all_imgs_path)):
# for i in range(1):
full_path = all_imgs_path[i].strip()
sub_paths = full_path.split('/')
full_path_prefix = full_path[:-len(sub_paths[-1])]
if not os.path.exists(write_path_prefix + full_path_prefix):
os.makedirs(write_path_prefix + full_path_prefix)
print('%d\t%s' % (i, sub_paths[-1]))
img = cv2.imread(full_path)
aligned_img, new_land = align_face_49pts(img, all_imgs_land[i], box_enlarge, img_size)
cv2.imwrite(write_path_prefix + full_path, aligned_img)
all_imgs_new_land[i, :] = new_land
np.savetxt(list_path_prefix+'emotioNet_test_land.txt', all_imgs_new_land, fmt='%f', delimiter='\t')
| 3,367 | 41.1 | 119 | py |
ADLD | ADLD-master/dataset/write_AU_weight.py | import numpy as np
list_path_prefix = '../data/list/'
'''
example of content in 'BP4D_train_AUoccur.txt':
0 0 0 0 1 0
0 0 0 1 1 1
0 0 0 0 0 0
'''
imgs_AUoccur = np.loadtxt(list_path_prefix + 'BP4D_train_AUoccur.txt')
AUoccur_rate = np.zeros((1, imgs_AUoccur.shape[1]))
for i in range(imgs_AUoccur.shape[1]):
AUoccur_rate[0, i] = sum(imgs_AUoccur[:,i]>0) / float(imgs_AUoccur.shape[0])
AU_weight = 1.0 / AUoccur_rate
AU_weight = AU_weight / AU_weight.sum() * AU_weight.shape[1]
np.savetxt(list_path_prefix+'BP4D_train_weight.txt', AU_weight, fmt='%f', delimiter='\t')
| 575 | 27.8 | 89 | py |
Fitter | Fitter-master/gsf_core.py | from __future__ import print_function
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
matplotlib_version = eval(matplotlib.__version__.split(".")[0])
if matplotlib_version > 1:
plt.style.use("classic")
plt.rc('font',family='Times New Roman')
import os
import sys
import types
import numpy as np
import importlib
from time import time
import cPickle as pickle
from sedfit.dir_list import root_path
import sedfit.SED_Toolkit as sedt
from sedfit.mcmc import mcmc_emcee as mcmc
__all__ = ["configImporter", "fitter", "gsf_fitter"]
def configImporter(configfile):
"""
This function import the provided configure file.
Parameters
----------
configfile : string
The name of the configure file (with the path).
Returns
-------
config : module object
The imported module.
Notes
-----
None.
"""
pathList = configfile.split("/")
configPath = "/".join(pathList[0:-1])
sys.path.append(configPath)
configName = pathList[-1].split(".")[0]
config = importlib.import_module(configName)
return config
def fitter(sedData, sedModel, unctDict, parTruth, emceeDict, mpi_pool=None):
"""
This function is run the SED fitting with the MCMC method.
Parameters
----------
sedData : SEDClass object
The data set of SED.
sedModel : ModelCombiner object
The combined model. The parameters are set to generate the mock SED.
unctDict : dict
{
"lnf" : float, (-inf, lnf_max]
The ln of f, the imperfectness of the model.
"lna" : float, (-inf, lnf_max]
The ln of a, the amplitude of the residual correlation.
"lntau" : float, (-inf, lnf_max]
The ln of tau, the scale length of the residual correlation.
}
parTruth : bool
The toggle whether to provide the truth of the model.
emceeDict : dict
The dict containing the parameters for emcee to sample the parameter space.
mpi_pool : (optional) emcee.mpi_pool.MPIPool object
The pool of MPI to run, if provided.
Returns
-------
em : EmceeModel object
The object of EmceeModel.
Notes
-----
None.
"""
#->Prepare to run the iteration
t0 = time()
setupKeys = emceeDict["Setup"].keys()
print( "\n#{:-^50}#".format("emcee Setups") )
if not mpi_pool is None:
setupKeys.remove("threads")
print("**MPI mode")
for keys in setupKeys:
print("{0}: {1}".format(keys, emceeDict["Setup"][keys]))
threads = emceeDict["Setup"]["threads"]
printFrac = emceeDict["Setup"]["printfrac"]
psLow = emceeDict["Setup"]["pslow"]
psCenter = emceeDict["Setup"]["pscenter"]
psHigh = emceeDict["Setup"]["pshigh"]
#->Start the iteration
runList = emceeDict.keys()
runList.remove("Setup")
for loop_run in range(len(runList)):
runName = runList[loop_run]
#->Print the fitting stage.
runDict = emceeDict[runName]
runKeys = runDict.keys()
SamplerType = runDict.get("sampler", "EnsembleSampler")
nwalkers = runDict.get("nwalkers", 100)
iteration = runDict.get("iteration", [500, 500])
thin = runDict.get("thin", 1)
ballR = runDict.get("ball-r", 0.1)
print( "\n#{:-^50}#".format( " {0} ".format(runName) ) )
if (SamplerType == "EnsembleSampler") & ("ntemps" in runKeys):
runKeys.remove("ntemps")
for keys in runKeys:
print("{0}: {1}".format(keys, runDict[keys]))
#->Setup the sampler
if unctDict is None:
modelUnct = False
else:
modelUnct = True
em = mcmc.EmceeModel(sedData, sedModel, modelUnct, unctDict, SamplerType)
if SamplerType == "EnsembleSampler":
if mpi_pool is None:
sampler = em.EnsembleSampler(nwalkers, threads=threads)
else:
sampler = em.EnsembleSampler(nwalkers, pool=mpi_pool)
if loop_run == 0: #If it is the first iteration, the initial position of the walkers are set.
p0 = [em.from_prior() for i in range(nwalkers)]
else:
p0 = em.p_ball(pcen, ratio=ballR)
elif SamplerType == "PTSampler":
ntemps = runDict["ntemps"]
if mpi_pool is None:
sampler = em.PTSampler(ntemps, nwalkers, threads=threads)
else:
sampler = em.PTSampler(ntemps, nwalkers, pool=mpi_pool)
if loop_run == 0:#If it is the first iteration, the initial position of the walkers are set.
p0 = []
for i in range(ntemps):
p0.append([em.from_prior() for i in range(nwalkers)])
else:
p0 = em.p_ball(pcen, ratio=ballR)
#->Run the MCMC sampling
for i in range(len(iteration)):
em.reset()
steps = iteration[i]
print( "\n{:*^35}".format(" {0}th {1} ".format(i, runName)) )
em.run_mcmc(p0, iterations=steps, printFrac=printFrac, thin=thin)
em.diagnose()
pcen = em.p_logl_max() #pcen = em.p_median()
em.print_parameters(truths=parTruth, burnin=0)
em.plot_lnlike(filename="gsf_temp_lnprob.png", histtype="step")
print( "**Time ellapse: {0:.3f} hour".format( (time() - t0)/3600. ) )
p0 = em.p_ball(pcen, ratio=ballR)
return em
def gsf_fitter(configName, targname=None, redshift=None, distance=None,
sedFile=None, mpi_pool=None, refit=False):
"""
The wrapper of fitter() function. If the targname, redshift and sedFile are
provided as arguments, they will be used overriding the values in the config
file saved in configName. If they are not provided, then, the values in the
config file will be used.
Parameters
----------
configName : str
The full path of the config file.
targname : str or None by default
The name of the target.
redshift : float or None by default
The redshift of the target.
distance : float or None by default
The distance of the source from the Sun.
sedFile : str or None by default
The full path of the sed data file.
mpi_pool : (optional) emcee.mpi_pool.MPIPool object
The pool of MPI to run, if provided.
Returns
-------
None.
Notes
-----
None.
"""
############################################################################
# Setup #
############################################################################
config = configImporter(configName)
if targname is None:
assert redshift is None
assert distance is None
assert sedFile is None
targname = config.targname
redshift = config.redshift
distance = config.distance
sedFile = config.sedFile
else:
assert not redshift is None
assert not sedFile is None
print("#--------------------------------#")
print("Target: {0}".format(targname))
print("Redshift: {0}".format(redshift))
print("Distance: {0}".format(distance))
print("SED file: {0}".format(sedFile))
print("Config file: {0}".format(configName))
print("#--------------------------------#")
#-> Verbose or not
try:
silent = config.silent
except:
silent = False
#-> Check whether there is already fitting results
try:
ppDict = config.ppDict
except:
print("[gsf] Warning: cannot find ppDict in the configure file!")
ppDict = {}
savePath = ppDict.get("savepath", "results/")
if os.path.isfile("{0}{1}.fitrs".format(savePath, targname)):
if refit:
print("The object {0} is overwrited!".format(targname))
else:
print("The object {0} is skipped!".format(targname))
return 1
#-> Dump the modelDict for model_functions.py to choose the modules to import
modelDict = config.modelDict
modelDictPath = "{0}temp_model.dict".format(root_path)
fp = open(modelDictPath, "w")
pickle.dump(modelDict, fp)
fp.close()
#->Setup the data Data
dataDict = config.dataDict
sedPck = sedt.Load_SED(sedFile)
from sedfit import sedclass as sedsc
sedData = sedsc.setSedData(targname, redshift, distance, dataDict, sedPck, silent)
#->Setup the model
print("#--------------------------------#")
print("The model info:")
parCounter = 0
for modelName in modelDict.keys():
print("[{0}]".format(modelName))
model = modelDict[modelName]
for parName in model.keys():
param = model[parName]
if not isinstance(param, types.DictType):
continue
elif param["vary"]:
print("-- {0}, {1}".format(parName, param["type"]))
parCounter += 1
else:
pass
print("Varying parameter number: {0}".format(parCounter))
print("#--------------------------------#")
#--> Import the model functions
from sedfit import model_functions as sedmf
funcLib = sedmf.funcLib
waveModel = config.waveModel
try:
parAddDict_all = config.parAddDict_all
except:
parAddDict_all = {}
parAddDict_all["DL"] = sedData.dl
parAddDict_all["z"] = redshift
parAddDict_all["frame"] = "rest"
#from sedfit.fitter import basicclass as bc
#sedModel = bc.Model_Generator(modelDict, funcLib, waveModel, parAddDict_all)
from sedfit.sedmodel import SedModel
sedModel = SedModel(modelDict, funcLib, waveModel, parAddDict_all)
############################################################################
# Fit #
############################################################################
parTruth = config.parTruth #Whether to provide the truth of the model
unctDict = config.unctDict
emceeDict = config.emceeDict
em = fitter(sedData, sedModel, unctDict, parTruth, emceeDict, mpi_pool)
############################################################################
# Post process #
############################################################################
print("#--------------------------------#")
#-> Remove the temp files
os.remove(modelDictPath)
#-> Load the post process information
psLow = ppDict.get("low", 16)
psCenter = ppDict.get("center", 50)
psHigh = ppDict.get("high", 84)
nuisance = ppDict.get("nuisance", True)
fraction = ppDict.get("fraction", 0)
burnIn = ppDict.get("burn-in", 50)
#-> Dump the fitting results
#--> Check the save path. Create the directory if it does not exists.
if not os.path.isdir(savePath):
os.makedirs(savePath)
print("Save all the results to: {0}".format(savePath))
dataPck = {
"targname": targname,
"redshift": redshift,
"distance": sedData.dl,
"sedPck": sedPck,
"dataDict": dataDict
}
modelPck = {
"modelDict": modelDict,
"waveModel": waveModel,
"parAddDict_all": parAddDict_all,
"parTruth": parTruth,
"unctDict": unctDict
}
fitrs = {
"dataPck": dataPck,
"modelPck": modelPck,
"ppDict": ppDict,
"posterior_sample": em.posterior_sample(burnin=burnIn, fraction=fraction),
"chain": em.sampler.chain,
"lnprobability": em.sampler.lnprobability
}
fp = open("{0}{1}.fitrs".format(savePath, targname), "w")
pickle.dump(fitrs, fp)
fp.close()
#->Save the best-fit parameters
em.Save_BestFit("{0}{1}_bestfit.txt".format(savePath, targname), low=psLow,
center=psCenter, high=psHigh, burnin=burnIn, fraction=fraction)
#->Plot the chain of the final run
em.plot_chain(filename="{0}{1}_chain.png".format(savePath, targname), truths=parTruth)
#->Plot the SED fitting result figure
sedwave = sedData.get_List("x")
sedflux = sedData.get_List("y")
xmin = np.min(sedwave) * 0.9
xmax = np.max(sedwave) * 1.1
xlim = [xmin, xmax]
ymin = np.min(sedflux) * 0.5
ymax = np.max(sedflux) * 3.0
ylim = [ymin, ymax]
fig = plt.figure(figsize=(10, 5))
ax = plt.gca()
cList = ["green", "orange", "blue", "yellow", "purple"]
cKwargs = { #The line properties of the model components.
"ls_uc": "--",
"alpha_uc": 0.1,
"lw_uc": 0.5,
"ls_bf": "--",
"alpha_bf": 1.0,
"lw_bf": 1.0,
}
tKwargs = { #The line properties of the model total.
"ls_uc": "-",
"alpha_uc": 0.1,
"lw_uc": 0.5,
"ls_bf": "-",
"alpha_bf": 1.0,
"lw_bf": 3.0,
"color": "red",
}
em.plot_fit(truths=parTruth, FigAx=(fig, ax), xlim=xlim, ylim=ylim, nSamples=100,
burnin=burnIn, fraction=fraction, cList=cList, cLineKwargs=cKwargs,
tLineKwargs=tKwargs)
ax.text(0.05, 0.95, targname, transform=ax.transAxes, fontsize=24,
verticalalignment='top', horizontalalignment='left',
bbox=dict(facecolor='white', alpha=0.5, edgecolor="none"))
ax.legend(loc="lower right", framealpha=0.3, fontsize=15, numpoints=1)
plt.savefig("{0}{1}_result.png".format(savePath, targname), bbox_inches="tight")
plt.close()
#->Plot the posterior probability distribution
em.plot_corner(filename="{0}{1}_triangle.png".format(savePath, targname),
burnin=burnIn, nuisance=nuisance, truths=parTruth,
fraction=fraction, quantiles=[psLow/100., psCenter/100., psHigh/100.],
show_titles=True, title_kwargs={"fontsize": 20})
print("Post-processed!")
return 0
| 14,048 | 36.364362 | 105 | py |
Fitter | Fitter-master/gsf.py | from __future__ import print_function
import os
import sys
import warnings
from optparse import OptionParser
from gsf_core import *
#Parse the commands#
#-------------------#
parser = OptionParser()
parser.add_option("-w", "--warning", dest="warning",
action="store_true", default=False,
help="Stop ignoring the warnings.")
parser.add_option("-o", "--overwrite", dest="overwrite",
action="store_true", default=False,
help="Overwrite the object information with the command-line inputs.")
parser.add_option("-r", "--refit", dest="refit", action="store_true", default=False,
help="Refit the SED though there is a result found.")
(options, args) = parser.parse_args()
if len(args) == 0:
raise AssertionError("The config file is not specified!")
else:
configName = args[0] #Get the input configure file information.
#Some times the warning may stop the code, so we ignore the warnings by default.
if options.warning:
pass
else:
warnings.simplefilter("ignore")
if options.refit:
refit = True
else:
refit = False
#The starter of this module#
#--------------------------#
print("\n")
print("############################")
print("# Galaxy SED Fitter starts #")
print("############################")
print("\n")
#->The object can be provided by the configure file or be overwrite with the
#command-line inputs
len_args = len(args)
if not options.overwrite:
if len_args > 1:
print("**Warning[UniFit]: there are more arguments may not be used...")
gsf_fitter(configName, refit=refit)
else:
if len_args < 4:
raise AssertionError("The object information is lacking!")
if len_args == 4:
targname = args[1]
redshift = eval(args[2])
distance = None
sedFile = args[3]
elif len_args == 5:
targname = args[1]
redshift = eval(args[2])
distance = eval(args[3]) #The distance should be in Mpc.
sedFile = args[4]
else:
print("**Warning[UniFit]: there are more arguments may not be used...")
gsf_fitter(configName, targname, redshift, distance, sedFile, refit=refit)
| 2,177 | 33.03125 | 88 | py |
Fitter | Fitter-master/MockFit.py | import os
import gc
import sys
import gsf
import warnings
import traceback
import importlib
import numpy as np
import rel_SED_Toolkit as sedt
from optparse import OptionParser
from astropy.table import Table
#Include the config directory#
#----------------------------#
if os.path.isdir("configs"):
sys.path.append("configs/")
#Parse the commands#
#-------------------#
parser = OptionParser()
parser.add_option("-l", "--list", dest="list", default=None, metavar="FILE",
help="Provide a list of target info to fit.")
parser.add_option("-n", "--usename", dest="usename",
action="store_true", default=False,
help="Try to find the config file specified with the target name.")
parser.add_option("-r", "--refit", dest="refit",
action="store_true", default=False,
help="Refit the SED though there is a result found.")
parser.add_option("-w", "--warning", dest="warning",
action="store_true", default=False,
help="Stop ignoring the warnings.")
(options, args) = parser.parse_args()
if len(args) == 0:
raise AssertionError("The config file is not specified!")
#Some times the warning may stop the code, so we ignore the warnings by default.
if options.warning:
pass
else:
warnings.simplefilter("ignore")
configName = args[0]
config = importlib.import_module(configName.split("/")[-1].split(".")[0])
targetList = options.list
if targetList is None: #->If the target list is not provided, only fit one target according to the config file.
targname = config.targname
redshift = config.redshift
sedFile = config.sedFile
sedPck = sedt.Load_SED(sedFile, config.sedRng, config.spcRng, config.spcRebin)
print("#--------------------------------#")
print("Target: {0}".format(targname))
print("SED file: {0}".format(sedFile))
print("Config file: {0}".format(configName))
print("#--------------------------------#")
with open(sedFile, "r") as f:
linesT = f.readlines()
exec linesT[-1][1:]
config.parTruth = inputPars
#print "parTruth:"
#print config.parTruth
gsf.fitter(targname, redshift, sedPck, config)
else: #->If the target list is provided, fit the targets one by one.
if len(args) == 2:
sedPath = args[1]
else:
sedPath = ""
targTable = Table.read(targetList , format="ascii.ipac")
nameList = targTable["Name"]
zList = targTable["z"]
sedList = targTable["sed"]
print("\n***There are {0} targets to fit!\n".format(len(nameList)))
for loop in range(len(nameList)):
targname = nameList[loop]
redshift = zList[loop]
sedname = sedList[loop]
if not options.refit: #Omit the target if there is a fitting result.
fileList = os.listdir(".")
if "{0}_bestfit.txt".format(targname) in fileList:
print("\n***{0} has been fitted!\n".format(targname))
continue
if options.usename: #Try to use the config file of the target itself.
fileList = os.listdir(".")
configTry = "config_{0}.py".format(targname)
if configTry in fileList:
configName = configTry
sedFile = sedPath + sedname
sedPck = sedt.Load_SED(sedFile, config.sedRng, config.spcRng, config.spcRebin)
print("#--------------------------------#")
print("Target: {0}".format(targname))
print("SED file: {0}".format(sedFile))
print("Config file: {0}".format(configName))
print("#--------------------------------#")
with open(sedFile, "r") as f:
linesT = f.readlines()
exec linesT[-1][1:]
config.parTruth = inputPars
#print "parTruth:"
#print config.parTruth
try:
gsf.fitter(targname, redshift, sedPck, config)
except:
print("\n---------------------------")
print("***Fitting {0} is failed!".format(targname))
traceback.print_exc()
print("---------------------------")
gc.collect()
| 4,150 | 38.160377 | 111 | py |
Fitter | Fitter-master/UniFit.py | from __future__ import print_function
import os
from optparse import OptionParser
from astropy.table import Table
def makeCommand(cDict):
"""
Make up the command line from the dict.
"""
commandList = [cDict["head"]]
for item in cDict["options"]:
commandList.append(item)
for item in cDict["args"]:
commandList.append(item)
cLine = " ".join(commandList)
return cLine
#->Parse the commands
parser = OptionParser()
parser.add_option("-m", "--mpi_ncore", dest="ncores", default="1",
help="Run the code with MPI using the asigned number of cores if ncores>1.")
parser.add_option("-w", "--warning", dest="warning", action="store_true", default=False,
help="Stop ignoring the warnings.")
parser.add_option("-o", "--overwrite", dest="overwrite", action="store_true", default=False,
help="Overwrite the object information with the command-line inputs.")
parser.add_option("-l", "--list", dest="list", default=None,
help="Provide a list of target info to fit.")
parser.add_option("-f", "--list_format", dest="list_format", default="ascii.ipac",
help="Provide a list of target info to fit.")
parser.add_option("-r", "--refit", dest="refit", action="store_true", default=False,
help="Refit the SED though there is a result found. Only works with -l assigned.")
parser.add_option("-c", "--config", dest="config", default=None,
help="Assign the config file for a list of objects.")
parser.add_option("-p", "--path", dest="path", default=None,
help="Assign the path of the SED data for a list of objects.")
(options, args) = parser.parse_args()
if options.list is None:
if not options.config is None:
parser.error("Option -c only works with -l.")
if not options.path is None:
parser.error("Option -p only works with -l.")
#->Determine whether use MPI or not.
commandDict = {}
ncores = eval(options.ncores)
if ncores == 1:
commandHead = "python gsf.py"
elif ncores > 1:
commandHead = "mpirun -np {0} python gsf_mpi.py".format(ncores)
commandDict = {
"head": commandHead,
}
#->Determine whether to run a list of objects.
targetList = options.list
if targetList is None:
commandDict["options"] = []
commandDict["args"] = args
if options.overwrite:
commandDict["options"].append("-o")
if options.warning:
commandDict["options"].append("-w")
if options.refit: # Force to refit the objects
commandDict["options"].append("-r")
commandLine = makeCommand(commandDict)
os.system(commandLine)
else: #If the target list is provided, fit the targets one by one.
sedPath = options.path
if sedPath is None:
sedPath = ""
targTable = Table.read(targetList, format=options.list_format)
nameList = targTable["Name"]
zList = targTable["z"]
sedList = targTable["sed"]
print("\n***There are {0} targets to fit!\n".format(len(nameList)))
for loop in range(len(nameList)):
targname = nameList[loop]
redshift = zList[loop]
if options.config is None:
try:
configName = targTable["config"][loop]
except:
raise RuntimeError("Fail to find the config file information.")
else:
configName = options.config
commandArgs = [configName, targname, "{0}".format(redshift)]
if "DL" in targTable.colnames:
distance = targTable["DL"][loop]
commandArgs.append("{0}".format(distance))
else:
distance = None
sedFile = sedPath + sedList[loop]
commandArgs.append(sedFile)
#-> Add options
commandDict["options"] = ["-o"] #We need to overwrite the target info.
if options.refit: # Force to refit the objects
commandDict["options"].append("-r")
if options.warning:
commandDict["options"].append("-w")
commandDict["args"] = commandArgs
commandLine = makeCommand(commandDict)
try:
os.system(commandLine)
except:
print("***Fail to run:\n '{0}'!".format(commandLine))
| 4,223 | 38.849057 | 100 | py |
Fitter | Fitter-master/mockSED.py | import os
import sys
import types
import importlib
import numpy as np
#np.seterr(all="ignore")
import george
from george import kernels
import matplotlib
import matplotlib.pyplot as plt
matplotlib_version = eval(matplotlib.__version__.split(".")[0])
if matplotlib_version > 1:
plt.style.use("classic")
import sedfit.SED_Toolkit as sedt
from astropy.table import Table
from sedfit.fitter import basicclass as bc
from sedfit import sedclass as sedsc
from sedfit import model_functions as sedmf
from sedfit.fit_functions import logLFunc_gp, logLFunc
def dataPerturb(x, sigma, pert=True, maxIter=10):
"""
Perturb the data assuming it is a Gaussian distribution around the detected
values with standard deviation as the uncertainties.
"""
if pert:
xp = sigma * np.random.randn(len(np.atleast_1d(x))) + x
counter = 0
while np.any(xp<=0):
xp = sigma * np.random.randn(len(np.atleast_1d(x))) + x
counter += 1
if counter > maxIter:
raise ValueError("The data is too noisy...")
else:
xp = x
return xp
def randomRange(low, high):
"""
Calculate the random number in range [low, high).
Parameters
----------
low : float
The lower boundary.
high: float
The upper boundary.
Returns
-------
r : float
The random number in [low, high).
Notes
-----
None.
"""
assert high >= low
rg = high - low
r = low + rg * np.random.rand()
return r
def configImporter(configfile):
"""
This function import the provided configure file.
Parameters
----------
configfile : string
The name of the configure file (with the path).
Returns
-------
config : module object
The imported module.
Notes
-----
None.
"""
pathList = configfile.split("/")
configPath = "/".join(pathList[0:-1])
sys.path.append(configPath)
configName = pathList[-1].split(".")[0]
config = importlib.import_module(configName)
return config
def mocker(sedData, sedModel, sysUnc=None, uncModel=None, silent=True,
pert=True, nonDetect=True):
"""
This function is to generate a mock SED according to a given observed SED.
Basically, the flux densities will be replaced by the model value while the
wavelength and uncertainties of the data will be kept.
Parameters
----------
sedData : SEDClass object
The data set of SED.
sedModel : ModelCombiner object
The combined model. The parameters are set to generate the mock SED.
sysUnc : dict or None, by default
{
"pht": [([nBgn, nEnd], frac), ([nBgn, nEnd], frac), ...],
"spc": frac
}
uncModel : dict or None, by default
{
"lnf" : float, (-inf, 0]
The ln of f, the imperfectness of the model.
"lna" : float, (-inf, 1]
The ln of a, the amplitude of the residual correlation.
"lntau" : float, (-inf, 1]
The ln of tau, the scale length of the residual correlation.
}
pert : bool, default: True
Perturb the data according to the uncertainty if True.
nonDetect : bool, default: True
Replace the upperlimits of the sedData to the mock SED.
Returns
-------
mockPck : dict
The dict of mock data.
sed : the concatenated wave, flux and sigma of the SED.
pht : the photometric wave, flux, sigma and band.
spc : the spectra wave, flux and sigma.
"""
################################################################################
# Data #
################################################################################
#->Generate the mock data
waveModel = sedModel.get_xList()
fluxModel = sedModel.combineResult()
mockPht0 = np.array(sedData.model_pht(waveModel, fluxModel))
mockSpc0 = np.array(sedData.model_spc(sedModel.combineResult))
#->Make sure the perturbed data not likely be too far away, or even negative.
#For photometric data
mockPhtSigma = np.array(sedData.get_dsList("e"))
fltr_sigma = mockPht0 < 3.0*mockPhtSigma #sedsigma
if np.any(fltr_sigma):
print("[mocker] Warning: There are some bands with flux less than 3*sigma!")
mockPhtSigma[fltr_sigma] = mockPht0[fltr_sigma] / 3.0
mockPht = dataPerturb(mockPht0, mockPhtSigma, pert)
#For spectroscopic data
mockSpcSigma = np.array(sedData.get_csList("e"))
fltr_sigma = mockSpc0 < 3.0*mockSpcSigma
if np.any(fltr_sigma):
mockSpcSigma[fltr_sigma] = mockSpc0[fltr_sigma] / 3.0
mockSpc = dataPerturb(mockSpc0, mockSpcSigma, pert)
mockPhtWave = np.array(sedData.get_dsList("x"))
mockSpcWave = np.array(sedData.get_csList("x"))
#->Systematic uncertainties
if not sysUnc is None:
sysSpc = sysUnc["spc"]
mockSpc = (1 + randomRange(-sysSpc, sysSpc)) * mockSpc
sysPhtList = sysUnc["pht"]
for phtRg, frac in sysPhtList:
#print phtRg, frac, mockPht[phtRg[0]:phtRg[1]]
mockPht[phtRg[0]:phtRg[1]] = (1 + randomRange(-frac, frac)) * mockPht[phtRg[0]:phtRg[1]]
#->Model imperfectness & spectral residual correlation
if not uncModel is None:
e = np.e
#For the photometric data
if sedData.check_dsData():
f = e**uncModel["lnf"]
mockPht = (1 + randomRange(-f, f)) * mockPht
else:
f = 0
#For the spectral data
if sedData.check_csData():
a = e**uncModel["lna"]
tau = e**uncModel["lntau"]
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
mockSpc = (1 + randomRange(-f, f)) * mockSpc
mockSpc += gp.sample(mockSpcWave)
#->Add the upperlimits
if nonDetect:
phtflux = np.array(sedData.get_dsList("y"))
phtflag = np.array(sedData.get_dsList("f"))
fltr_undct = phtflag == 1
mockPht[fltr_undct] = phtflux[fltr_undct]
mockPhtSigma[fltr_undct] = -1
mockSedFlux = np.concatenate([mockPht, mockSpc])
mockSedWave = np.concatenate([mockPhtWave, mockSpcWave])
mockSedSigma = np.concatenate([mockPhtSigma, mockSpcSigma])
mockPhtBand = sedData.get_unitNameList()
mockSpcBand = np.zeros_like(mockSpcWave)
mockSedBand = np.concatenate([mockPhtBand, mockSpcBand])
mockPck = {
"sed": (mockSedWave, mockSedFlux, mockSedSigma, mockSedBand),
"pht": (mockPhtWave, mockPht, mockPhtSigma, mockPhtBand),
"spc": (mockSpcWave, mockSpc, mockSpcSigma, mockSpcBand),
}
print "mockPht", mockPht
return mockPck
def sedLnLike(sedData, sedModel, uncModel):
"""
Calculate the lnlike of the SED.
"""
mockPars = sedModel.get_parVaryList()
if uncModel is None:
lnlike = logLFunc(mockPars, sedData, sedModel)
else:
mockPars = list(mockPars)
mockPars.append(uncModel["lnf"])
mockPars.append(uncModel["lna"])
mockPars.append(uncModel["lntau"])
lnlike = logLFunc_gp(mockPars, sedData, sedModel)
return lnlike
def PlotMockSED(sedData, mockData, sedModel):
waveModel = sedModel.get_xList()
sedPhtFlux = sedData.get_List("y")
xmin = np.min(waveModel)
xmax = np.max(waveModel)
ymin = np.min(sedPhtFlux) / 10.0
ymax = np.max(sedPhtFlux) * 10.0
FigAx = sedData.plot_sed()
FigAx = mockData.plot_sed(FigAx=FigAx, phtColor="r", spcColor="r")
FigAx = sedModel.plot(FigAx=FigAx)
fig, ax = FigAx
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
return (fig, ax)
def gsm_mocker(configName, targname=None, redshift=None, distance=None, sedFile=None,
mockPars=None, uncModel=None, plot=False, cal_lnlike=False, **kwargs):
"""
The wrapper of mocker() function. If the targname, redshift and sedFile
are provided as arguments, they will be used overriding the values in the
config file saved in configName. If they are not provided, then, the values
in the config file will be used.
Parameters
----------
configName : str
The full path of the config file.
targname : str or None by default
The name of the target.
redshift : float (optional)
The redshift of the target.
distance : float (optional)
The distance of the target.
sedFile : str or None by default
The full path of the sed data file.
mockPars : list
The parameters to generate the mock SED.
uncModel : dict or None, by default
{
"lnf" : float, (-inf, 0]
The ln of f, the imperfectness of the model.
"lna" : float, (-inf, 1]
The ln of a, the amplitude of the residual correlation.
"lntau" : float, (-inf, 1]
The ln of tau, the scale length of the residual correlation.
}
plot : bool, default: False
Plot the SED to visually check if True.
Returns
-------
mock : tuple
The (wavelength, flux, sigma, band) of the mock data in the observed frame.
lnlike : float (optional)
The lnlikelihood of the mock SED and the model.
FigAx : tuple (optional)
The (fig, ax) of the SED figure, if plot is True.
Notes
-----
None.
"""
#config = importlib.import_module(configName.split(".")[0])
config = configImporter(configName)
if targname is None:
assert redshift is None
assert sedFile is None
targname = config.targname
redshift = config.redshift
sedFile = config.sedFile
else:
assert not redshift is None
assert not sedFile is None
print("#--------------------------------#")
print("Target: {0}".format(targname))
print("SED file: {0}".format(sedFile))
print("Config file: {0}".format(configName))
print("#--------------------------------#")
try:
silent = config.silent
except:
silent = False
############################################################################
# Data #
############################################################################
dataDict = config.dataDict
sedPck = sedt.Load_SED(sedFile)
sedData = sedsc.setSedData(targname, redshift, distance, dataDict, sedPck, silent)
############################################################################
# Model #
############################################################################
modelDict = config.modelDict
print("The model info:")
parCounter = 0
for modelName in modelDict.keys():
print("[{0}]".format(modelName))
model = modelDict[modelName]
for parName in model.keys():
param = model[parName]
if not isinstance(param, types.DictType):
continue
elif param["vary"]:
print("-- {0}, {1}".format(parName, param["type"]))
parCounter += 1
else:
pass
print("Varying parameter number: {0}".format(parCounter))
print("#--------------------------------#")
#->Build up the model
funcLib = sedmf.funcLib
waveModel = config.waveModel
try:
parAddDict_all = config.parAddDict_all
except:
parAddDict_all = {}
parAddDict_all["DL"] = sedData.dl
parAddDict_all["z"] = redshift
parAddDict_all["frame"] = "rest"
sedModel = bc.Model_Generator(modelDict, funcLib, waveModel, parAddDict_all)
sedModel.updateParList(mockPars) #Set the model with the desiring parameters.
############################################################################
# Mock #
############################################################################
mockPck = mocker(sedData, sedModel, **kwargs)
#->Reform the result and switch back to the observed frame
sed = mockPck["sed"]
pht = mockPck["pht"]
spc = mockPck["spc"]
sed = sedt.SED_to_obsframe(sed, redshift)
pht = sedt.SED_to_obsframe(pht, redshift)
spc = sedt.SED_to_obsframe(spc, redshift)
phtwave = pht[0]
phtband = pht[3]
spcwave = spc[0]
mockSedWave = sed[0]
mockSedFlux = sed[1]
mockSedSigma = sed[2]
mockSedBand = np.concatenate([phtband, np.zeros_like(spcwave, dtype="int")])
mock = (mockSedWave, mockSedFlux, mockSedSigma, mockSedBand)
result = [mock]
#->Calculate the lnlike
mockDict = {
"phtName": dataDict["phtName"],
"spcName": dataDict["spcName"],
"bandList_use": dataDict["bandList_use"],
"bandList_ignore": dataDict["bandList_ignore"],
"frame": "obs",
}
mockPck = {
"sed": sed,
"pht": pht,
"spc": spc
}
mockData = sedsc.setSedData(targname, redshift, distance, mockDict, mockPck, silent)
if cal_lnlike:
lnlike = sedLnLike(mockData, sedModel, uncModel)
result.append(lnlike)
#->Plot
if plot:
FigAx = PlotMockSED(sedData, mockData, sedModel)
result.append(FigAx)
return result
if __name__ == "__main__":
filename = "haha/aaa/bbb"
pathList = filename.split("/")
print pathList
configPath = "/".join(pathList[0:-1])
print configPath
'''
#-->Generate Mock Data
parTable = Table.read("/Volumes/Transcend/Work/PG_MCMC/pg_clu_qpahVar/compile_pg_clu.ipac", format="ascii.ipac")
infoTable = Table.read("targlist/targlist_rq.ipac", format="ascii.ipac")
#print parTable.colnames
if os.path.isdir("configs"):
sys.path.append("configs/")
configName = "config_mock_clu"
mockSub = "test"
parNameList = ['logMs', 'logOmega', 'T', 'logL', 'i', 'tv', 'q', 'N0', 'sigma', 'Y',
'logumin', 'qpah', 'gamma', 'logMd']
comments = """
#This mock SED is created from {0} at redshift {1}.
#The uncertainties of the data are the real uncertainties of the sources.
#The systematics: WISE:{S[0]}, PACS:{S[1]}, SPIRE:{S[2]}, MIPS:{S[3]}.
#The config file in use is {2}.
#lnlikelihood = {3}
#parNames = {4}
#inputPars = {5}
"""
#->WISE (Jarrett2011), PACS(Balog2014), SPIRE(Pearson2013), Spitzer(MIPS handbook)
sysUnc = {
#"spc": 0.05,
"spc": 0.00,
#"pht": [([0, 2], 0.03), ([2, 5], 0.05), ([5, 8], 0.05)]
"pht": [([0, 2], 0.00), ([2, 5], 0.00), ([5, 8], 0.00)] #Check for the accurate case
}
#loop_T = 0
nRuns = 1 #len(parTable)
for loop_T in range(nRuns):
targname = infoTable["Name"][loop_T]
redshift = infoTable["z"][loop_T]
sedFile = infoTable["sed"][loop_T]
fltr_Target = parTable["Name"]==targname
#Load the mock parameters
mockPars = []
for parName in parNameList:
mockPars.append(parTable["{0}_C".format(parName)][fltr_Target][0])
#print parTable[loop_T]
gsmPck = gsm_mocker(configName, targname, redshift, sedFile=sedFile,
mockPars=mockPars, sysUnc=sysUnc, #uncModel=[-np.inf, -np.inf, -np.inf],
pert=False, plot=True, cal_lnlike=True)
mock, lnlike, FigAx = gsmPck
plt.savefig("mock/{0}_mock.png".format(targname), bbox_inches="tight")
plt.close()
print("--------lnlike={0:.5f}".format(lnlike))
#->Save mock file
wave = mock[0]
flux = mock[1]
sigma = mock[2]
band = mock[3]
mockTable = Table([wave, flux, sigma, band],
names=['wavelength', 'flux', 'sigma', "band"])
mockTable["wavelength"].format = "%.3f"
mockTable["flux"].format = "%.3f"
mockTable["sigma"].format = "%.3f"
mockName = "mock/{0}_{1}.msed".format(targname, mockSub)
mockTable.write(mockName, format="ascii", delimiter="\t", overwrite=True)
#f = open("mock/{0}_{1}.msed".format(mockName, mockSub), "w")
f = open(mockName, "a")
suList = [sysUnc["pht"][0][1], sysUnc["pht"][1][1], sysUnc["pht"][2][1], sysUnc["spc"]]
cmnt = comments.format(targname, redshift, configName, lnlike, parNameList, mockPars, S=suList)
f.writelines(cmnt)
f.close()
'''
| 16,520 | 35.550885 | 116 | py |
Fitter | Fitter-master/gsf_mpi.py | from __future__ import print_function
import os
import sys
import warnings
from optparse import OptionParser
from emcee.utils import MPIPool
from gsf_core import *
#Parse the commands#
#-------------------#
parser = OptionParser()
parser.add_option("-w", "--warning", dest="warning",
action="store_true", default=False,
help="Stop ignoring the warnings.")
parser.add_option("-o", "--overwrite", dest="overwrite",
action="store_true", default=False,
help="Overwrite the object information with the command-line inputs.")
parser.add_option("-r", "--refit", dest="refit", action="store_true", default=False,
help="Refit the SED though there is a result found.")
(options, args) = parser.parse_args()
if len(args) == 0:
raise AssertionError("The config file is not specified!")
else:
configName = args[0] #Get the input configure file information.
#Some times the warning may stop the code, so we ignore the warnings by default.
if options.warning:
pass
else:
warnings.simplefilter("ignore")
if options.refit:
refit = True
else:
refit = False
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
#-> The starter of this module
print("\n")
print("############################")
print("# Galaxy SED Fitter starts #")
print("############################")
print("\n")
#--> The object can be provided by the configure file or be overwrite with the
# command-line inputs
len_args = len(args)
if not options.overwrite:
if len_args > 1:
print("**Warning[UniFit]: there are more arguments may not be used...")
gsf_fitter(configName, mpi_pool=pool, refit=refit)
else:
if len_args < 4:
pool.close()
raise AssertionError("The object information is lacking!")
if len_args == 4:
targname = args[1]
redshift = eval(args[2])
distance = None
sedFile = args[3]
elif len_args == 5:
targname = args[1]
redshift = eval(args[2])
distance = eval(args[3]) #The distance should be in Mpc.
sedFile = args[4]
else:
print("**Warning[UniFit]: there are more arguments may not be used...")
gsf_fitter(configName, targname, redshift, distance, sedFile, pool, refit)
pool.close()
| 2,309 | 31.535211 | 88 | py |
Fitter | Fitter-master/sedfit/sedclass.py | #This code is from: Composite_Model_Fit/dl07/dev_SEDClass.ipynb
import types
import numpy as np
import matplotlib.pyplot as plt
from fitter import basicclass as bc
from . import bandfunc as bf
from .dir_list import filter_path
from scipy.interpolate import splrep, splev
from collections import OrderedDict
import SED_Toolkit as sedt
#import sedfit.SED_Toolkit as sedt
__all__ = ["SedClass", "setSedData"]
ls_mic = 2.99792458e14 #micron/s
xlabelDict = {
"cm": r'$\lambda \, \mathrm{(cm)}$',
"mm": r'$\lambda \, \mathrm{(mm)}$',
"micron": r'$\lambda \, \mathrm{(\mu m)}$',
"angstrom": r'$\lambda \, \mathrm{(\AA)}$',
"Hz": r'$\nu \, \mathrm{(Hz)}$',
"MHz": r'$\nu \, \mathrm{(MHz)}$',
"GHz": r'$\nu \, \mathrm{(GHz)}$',
}
ylabelDict = {
"fnu": r'$f_\nu \, \mathrm{(mJy)}$',
"nufnu": r'$\nu f_\nu \, \mathrm{(erg\,s^{-1}\,cm^{-2})}$',
}
class SedClass(bc.DataSet):
"""
A class represent the SED data. It is a kind of DataSet class.
Parameters
----------
targetName : str
The target name of the SED.
redshift : str
The redshift of the target.
H0 : float, default: 67.8
The Hubble constant assumed for the cosmology.
Om0 : float, default: 0.308
The density of the vacuum assumed for the cosmology.
phtDict : dict, default: {}
The photometric data packed in a dict. The items should be
the DiscreteSet().
spcDict : dict, default: {}
The spectral data packed in a dict. The items should be the
ContinueSet().
"""
def __init__(self, targetName, redshift, Dist=None, H0=67.8, Om0=0.308,
phtDict={}, spcDict={}):
"""
Parameters
----------
targetName : string
The target name.
redshift : float
The redshift of the target.
Dist : float (optional)
The physical (or luminosity) distance of the source.
H0 : float
The Hubble constant.
Om0 : float
The baryonic mass fraction.
phtDict : dict
The dict containing the information of the photometric data.
spcDict : dict
The dict containing the information of the spectroscopic data.
Returns
-------
None.
Notes
-----
None.
"""
bc.DataSet.__init__(self, phtDict, spcDict)
self.targetName = targetName
self.redshift = redshift
spc_wave = np.array(self.get_csList("x"))
spc_flux = np.array(self.get_csList("y"))
spc_unct = np.array(self.get_csList("e"))
if bool(spcDict): # If there is spectral data
self.spc_WaveLength = np.max(spc_wave) - np.min(spc_wave)
self.spc_FluxMedian = np.sum(spc_flux / spc_unct**2) / np.sum(spc_unct**-2)
self.__bandDict = {}
if Dist is None:
if redshift > 1e-2:
#Calculate the luminosity distance
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
self.dl = cosmo.luminosity_distance(redshift).value #Luminosity distance in unit Mpc.
else:
raise ValueError("The redshift ({0}) is too small to accurately estimate the distance.".format(redshift))
else:
self.dl = Dist
def pht_plotter(self, wave, flux, sigma, flag, FigAx=None, ebDict=None,
Quiet=True, xUnits="micron", yUnits="fnu"):
wave = np.array(wave)
flux = np.array(flux)
sigma = np.array(sigma)
flag = np.array(flag)
if(len(wave) == 0):
if Quiet is False:
print 'There is no data in the SED!'
return FigAx
npt = len(wave) # The number of data points
nup = np.sum(sigma<0) # The number of upperlimits
if FigAx == None:
fig = plt.figure(figsize=(8, 6))
ax = fig.gca()
else:
fig = FigAx[0]
ax = FigAx[1]
#print 'The ax is provided!'
if ebDict is None:
ebDict = {
"linestyle": "none",
"ms": 6,
"mew": 1.5,
"elinewidth": 1.5,
"color": "black",
"fmt": "o",
"capsize": 0,
"zorder": 4,
}
if yUnits == "fnu": # Default settings, units: mJy
pass
elif yUnits == "nufnu": # Convert from mJy to erg s^-1 cm^-2
y_conv = ls_mic / sedt.WaveToMicron(wave, xUnits) * 1.e-26
flux *= y_conv
sigma *= y_conv
else:
raise ValueError("The yUnits ({0}) is not recognised!".format(yUnits))
ax.errorbar(wave, flux, yerr=sigma, uplims=flag, **ebDict)
ax.set_xlabel(xlabelDict[xUnits], fontsize=18)
ax.set_ylabel(ylabelDict[yUnits], fontsize=18)
ax.set_xscale('log')
ax.set_yscale('log')
ax.tick_params(labelsize=16)
return (fig, ax)
def plot_pht(self, FigAx=None, phtLW=1.5, phtColor='k', phtMS=6, zorder=4,
**kwargs):
dataDict = self.get_dsArrays()
for name in dataDict.keys():
wave = dataDict[name][0]
flux = dataDict[name][1]
sigma = dataDict[name][2]
flag = dataDict[name][3]
pht_ebDict = {
"linestyle": "none",
"ms": phtMS,
"mew": phtLW,
"elinewidth": phtLW,
"color": phtColor,
"fmt": "o",
"capsize": 0,
"zorder": zorder,
"label": name,
}
FigAx = self.pht_plotter(wave, flux, sigma, flag, FigAx, pht_ebDict,
**kwargs)
return FigAx
def spc_plotter(self, wave, flux, sigma, FigAx=None, ebDict=None, Quiet=True,
xUnits="micron", yUnits="fnu"):
wave = np.array(wave)
flux = np.array(flux)
sigma = np.array(sigma)
if(len(wave) == 0):
if Quiet is False:
print 'There is no data in the SED!'
return FigAx
if FigAx == None:
fig = plt.figure(figsize=(8, 6))
ax = fig.gca()
else:
fig = FigAx[0]
ax = FigAx[1]
if ebDict is None:
ebDict = {
"linewidth": 1.,
"color": "grey",
"zorder": 4.,
}
if yUnits == "fnu": # Default settings, units: mJy
pass
elif yUnits == "nufnu": # Convert from mJy to erg s^-1 cm^-2
y_conv = ls_mic / sedt.WaveToMicron(wave, xUnits) * 1.e-26
flux *= y_conv
sigma *= y_conv
else:
raise ValueError("The yUnits ({0}) is not recognised!".format(yUnits))
ax.step(wave, flux, **ebDict)
fel = flux - sigma
feu = flux + sigma
ebDict["label"] = None
ax.fill_between(wave, y1=fel, y2=feu, step="pre", alpha=0.4, **ebDict)
ax.set_xlabel(xlabelDict[xUnits], fontsize=18)
ax.set_ylabel(ylabelDict[yUnits], fontsize=18)
ax.set_xscale('log')
ax.set_yscale('log')
return (fig, ax)
def plot_spc(self, FigAx=None, spcLS="-", spcLW=2., spcColor='grey',
zorder=4, **kwargs):
dataDict = self.get_csArrays()
for name in dataDict.keys():
wave = dataDict[name][0]
flux = dataDict[name][1]
sigma = dataDict[name][2]
spc_ebDict = {
"linestyle": spcLS,
"linewidth": spcLW,
"color": spcColor,
"zorder": zorder,
"label": name,
}
FigAx = self.spc_plotter(wave, flux, sigma, FigAx, spc_ebDict,
**kwargs)
return FigAx
def plot_sed(self, FigAx=None, **kwargs):
if self.check_dsData() > 0:
FigAx = self.plot_pht(FigAx=FigAx, **kwargs)
if self.check_csData() > 0:
FigAx = self.plot_spc(FigAx=FigAx, **kwargs)
return FigAx
def add_bandpass(self, bandDict):
for bn in bandDict.keys():
if isinstance(bandDict[bn], bf.BandPass):
self.__bandDict[bn] = bandDict[bn]
else:
raise ValueError('The bandpass {0} has incorrect type!'.format(bn))
def set_bandpass(self, bandList, sedwave, silent=True):
z = self.redshift
bandDict = OrderedDict()
for loop in range(len(bandList)):
bn = bandList[loop]
bandCenter = sedwave[loop] * (1 + z)
#Determine how to use the relative spectral response data.
if bn in bf.monoFilters:
bandFile = "{0}.dat".format(bn)
bandPck = np.genfromtxt(filter_path+bandFile)
bandWave = bandPck[:, 0]
bandRsr = bandPck[:, 1]
bandDict[bn] = bf.BandPass(bandWave, bandRsr, bandCenter, "mono", bn, z, silent)
elif bn in bf.meanFilters:
bandFile = "{0}.dat".format(bn)
bandPck = np.genfromtxt(filter_path+bandFile)
bandWave = bandPck[:, 0]
bandRsr = bandPck[:, 1]
bandDict[bn] = bf.BandPass(bandWave, bandRsr, bandCenter, "mean", bn, z, silent)
else:
bandDict[bn] = bf.BandPass(bandCenter=bandCenter, bandType="none", bandName=bn, redshift=z, silent=silent)
if not silent:
print("The band {0} is not included in our database!".format(bn))
self.add_bandpass(bandDict)
def get_bandpass(self):
return self.__bandDict
def filtering(self, bandName, wavelength, flux):
"""
Calculate the flux density of the input spectrum filtered by
the specified band. The spectrum is considered at the rest frame.
Parameters
----------
bandName : str
The name of the band.
wavelength : float array
The wavelength of the input spectrum.
flux : float array
The flux of the input spectrum.
Returns
-------
A tuple of the band central wavelength and the flux density after filtered by
the bandpass.
Notes
-----
None.
"""
z = self.redshift
bandpass = self.__bandDict.get(bandName, None)
if bandpass is None:
raise AttributeError("The bandpass '{0}' is not found!".format(bandName))
bandwave = bandpass.get_bandCenter_rest()
bandflux = bandpass.filtering(wavelength, flux)
return (bandwave, bandflux)
def model_pht(self, wavelength, flux):
"""
Calculate the model flux density of the input spectrum at the wavelengths of
all the bands of the photometric data of the SED. The spectrum is considered
at the rest frame.
Parameters
----------
wavelength : float array
The wavelength of the input spectrum.
flux : float array
The flux of the input spectrum.
Returns
-------
fluxList : list
The model flux at all the wavelengths of the photometric SED.
Notes
-----
None.
"""
bandNameList = self.get_unitNameList()
fluxList = []
for bandName in bandNameList:
fluxList.append(self.filtering(bandName, wavelength, flux)[1])
return fluxList
def model_spc(self, fluxFunc, cSetName=None):
"""
Calculate the model flux density of the input spectrum at the wavelengths of
all the spectra. The input spectrum is considered at the rest frame.
Parameters
----------
wavelength : float array
The wavelength of the input spectrum.
fluxFunc : function
The the function to return the model fluxes.
cSetName : str or None by default
Specify the name of continual set to use.
Returns
-------
fluxList : list
The model flux at the wavelengths of the spectral SED.
Notes
-----
None.
"""
if cSetName is None:
cWaveList = self.get_csList('x')
else:
cSet = self.__continueSetDict.get(cSetName, None)
if cSet is None:
raise KeyError("The set name '{0}' is not found!".format(cSetName))
cWaveList = cSet.get_List('x')
if len(cWaveList) > 0:
fluxList = list( fluxFunc( np.array(cWaveList) ) )
else:
fluxList = []
return fluxList
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
def setSedData(targname, redshift, distance, dataDict, sedPck, silent=True):
"""
Setup sedData.
Parameters
----------
targname : string
The name of the target.
redshift : float
The redshift of the object.
distance : float
The distance of the object.
dataDict : dict
The information of the data.
phtName : the name of the photometric data.
spcName : the name of the spectroscopic data.
bandList_use : the list of bands to be used; use all bands if empty.
bandList_ignore : the list of bands to be ignored.
frame : the frame of the input data is in; "rest" or "obs".
sedPck : dict
The data package of the SED.
sed : concatenated SED.
pht : photometric data.
spc : spectroscopic data.
All the three items are tuple of ("wave", "flux", "sigma"), with "pht"
having an extra component "band" in the end.
silent : (optional) bool
The toggle not to print some information if True.
Returns
-------
sedData : SEDClass object
The data set of SED.
"""
pht = sedPck["pht"]
spc = sedPck["spc"]
#->The upperlimit corresponds to how many sigmas
nSigma = dataDict.get("nSigma", 3.)
#->Settle into the rest frame
frame = dataDict.get("frame", "rest") #The coordinate frame of the SED; "rest"
#by default.
if frame == "obs":
pht = sedt.SED_to_restframe(pht, redshift)
spc = sedt.SED_to_restframe(spc, redshift)
if not silent:
print("[setSedData]: The input SED is in the observed frame!")
elif frame == "rest":
if not silent:
print("[setSedData]: The input SED is in the rest frame!")
else:
if not silent:
print("[setSedData]: The input SED frame ({0}) is not recognised!".format(frame))
#->Select bands
bandList_use = dataDict.get("bandList_use", []) #The list of bands to incorporate;
#use all the available bands if empty.
bandList_ignore = dataDict.get("bandList_ignore", []) #The list of bands to be
#ignored from the bands to use.
pht = sedt.SED_select_band(pht, bandList_use, bandList_ignore, silent)
phtwave = np.array(pht[0])
phtflux = np.array(pht[1])
phtsigma = np.array(pht[2])
phtband = np.array(pht[3])
spcwave = np.array(spc[0])
spcflux = np.array(spc[1])
spcsigma = np.array(spc[2])
if not silent:
print("[setSedData]: The incorporated bands are: {0}".format(phtband))
#->Check data. If there are nan, raise error.
chck_pht = np.sum(np.isnan(phtflux)) + np.sum(np.isnan(phtsigma))
chck_spc = np.sum(np.isnan(spcflux)) + np.sum(np.isnan(spcsigma))
if chck_pht:
raise ValueError("The photometry contains bad data!")
if chck_spc:
raise ValueError("The spectrum contains bad data!")
#->Put into the sedData
phtName = dataDict.get("phtName", None)
if not phtName is None:
fltr_uplim = phtsigma < 0 #Find the upperlimits.
phtsigma[fltr_uplim] = phtflux[fltr_uplim] / nSigma #Restore the uncertainties for the non-detections.
phtflag = np.zeros_like(phtwave) #Generate the flag for the upperlimits
phtflag[fltr_uplim] = 1 #Find the position of the non-detections and mark them.
phtDataType = ["name", "wavelength", "flux", "error", "flag"]
phtData = {phtName: bc.DiscreteSet(phtband, phtwave, phtflux, phtsigma, phtflag, phtDataType)}
else:
phtData = {}
spcName = dataDict.get("spcName", None)
if not spcName is None:
spcflag = np.zeros_like(spcwave)
spcDataType = ["wavelength", "flux", "error", "flag"]
spcData = {spcName: bc.ContinueSet(spcwave, spcflux, spcsigma, spcflag, spcDataType)}
else:
spcData = {}
sedData = SedClass(targname, redshift, distance, phtDict=phtData, spcDict=spcData)
sedData.set_bandpass(phtband, phtwave, silent)
return sedData
| 17,142 | 35.787554 | 122 | py |
Fitter | Fitter-master/sedfit/model_functions.py | import os
import numpy as np
from collections import OrderedDict
import cPickle as pickle
from dir_list import root_path
__all__ = ["funcLib", "discreteFuncList"]
#-> Load the modelDict to select the modules to import
modelDictPath = "{0}temp_model.dict".format(root_path)
if os.path.isfile(modelDictPath):
fp = open(modelDictPath, "r")
modelDict = pickle.load(fp)
fp.close()
#--> No need to import all the modules
import_all = 0
else:
#--> Need to import all the modules
import_all = 1
print("Cannot find the temp_model.dict in {0}!".format(root_path))
#-> Load the modules
import_dict = {
"model_bc03": ["BC03", "BC03_PosPar"],
"model_bc03_refine": ["BC03_ref", "BC03_ref_PosPar"],
"model_dl07": ["DL07", "DL07_PosPar"],
"model_analyticals": ["Linear", "BlackBody", "Modified_BlackBody",
"Power_Law", "Synchrotron", "Line_Gaussian_L",
"Poly3"],
"model_xl": ["Torus_Emission", "Torus_Emission_PosPar"],
"model_torus_template": ["Torus_Template"],
"model_pah": ["pah"],
"model_clumpy": ["CLUMPY_intp"],
"model_cat3d_G": ["Cat3d_G", "Cat3d_G_PosPar"],
"model_cat3d_H": ["Cat3d_H", "Cat3d_H_PosPar"],
"model_cat3d_H_wind": ["Cat3d_H_wind", "Cat3d_H_wind_PosPar"],
"model_extinction": ["Calzetti00"],
"model_mir_extinction": ["Smith07"],
}
if import_all:
for mds in import_dict.keys():
funcList = import_dict[mds]
exec "from models.{0} import {1}".format(mds, ",".join(funcList))
else:
#-> Go through the functions in the modelDict
for fnm in modelDict.keys():
funcName = modelDict[fnm]["function"]
#--> Go through the import_dict and find the modules in use
for mds in import_dict.keys():
funcList = import_dict[mds]
if funcName in funcList:
exec "from models.{0} import {1}".format(mds, ",".join(funcList))
#-> Discrete functions
discreteFuncList = ["BC03", "BC03_ref", "Torus_Emission", "DL07", "Cat3d_G",
"Cat3d_H", "Cat3d_H_wind"]
#-> Dict of the supporting functions
funcLib = {
"Linear":{
"x_name": "x",
"param_fit": ["a", "b"],
"param_add": [],
"operation": ["+","*"]
},
"BC03":{
"x_name": "wave",
"param_fit": ["logMs", "age"],
"param_add": ["DL", "z", "frame", "t"],
},
"BC03_ref":{
"x_name": "wave",
"param_fit": ["logMs", "logAge", "sfh"],
"param_add": ["DL", "z", "frame", "t"],
},
"CLUMPY_intp": {
"x_name": "wave",
"param_fit": ["logL", "i", "tv", "q", "N0", "sigma", "Y"],
"param_add": ["DL", "z", "frame", "t"]
},
"Torus_Emission": {
"x_name": "wave",
"param_fit": ["typeSil", "size", "T1Sil", "T2Sil", "logM1Sil", "logM2Sil",
"typeGra", "T1Gra", "T2Gra", "R1G2S", "R2G2S"],
"param_add": ["DL", "z", "frame", "TemplateSil", "TemplateGra"]
},
"DL07": {
"x_name": "wave",
"param_fit": ["logumin", "logumax", "qpah", "loggamma", "logMd"],
"param_add": ["t", "DL", "z", "frame"]
},
"BlackBody": {
"x_name": "wave",
"param_fit": ["logOmega", "T"],
"param_add": []
},
"Modified_BlackBody": {
"x_name": "wave",
"param_fit": ["logM", "beta", "T"],
"param_add": ["DL", "z", "kappa0", "lambda0", "frame"]
},
"Power_Law": {
"x_name": "wave",
"param_fit": ["PL_alpha", "PL_logsf"],
"param_add": []
},
"Synchrotron": {
"x_name": "wave",
"param_fit": ["Sn_alpha", "Sn_logsf"],
"param_add": ["lognuc", "lognum"]
},
"Line_Gaussian_L": {
"x_name": "wavelength",
"param_fit": ["logLum", "lambda0", "FWHM"],
"param_add": ["DL"],
"operation": ["+", "*"]
},
"pah": {
"x_name": "wave",
"param_fit": ["logLpah"],
"param_add": ["t", "DL", "z", "frame", "waveLim"]
},
"Torus_Template": {
"x_name": "wave",
"param_fit": ["logLtorus"],
"param_add": ["DL", "z", "frame", "ttype", "waveLim"]
},
"Cat3d_G": {
"x_name": "wave",
"param_fit": ["a", "theta", "N0", "i", "logL"],
"param_add": ["DL", "z", "frame", "t"],
"operation": ["+"]
},
"Cat3d_H": {
"x_name": "wave",
"param_fit": ["a", "h", "N0", "i", "logL"],
"param_add": ["DL", "z", "frame", "t"],
"operation": ["+"]
},
"Cat3d_H_wind": {
"x_name": "wave",
"param_fit": ["a", "h", "N0", "i", 'fwd', 'aw', 'thetaw', 'thetasig', "logL"],
"param_add": ["DL", "z", "frame", "t"],
"operation": ["+"]
},
"Calzetti00": {
"x_name": "wave",
"param_fit": ["Av", "Rv"],
"param_add": ["waveLim", "QuietMode"],
"operation": ["*"]
},
"Smith07": {
"x_name": "wave",
"param_fit": ["logtau"],
"param_add": [],
"operation": ["*"]
},
"Poly3": {
"x_name": "x",
"param_fit": ["c0", "c1", "c2", "c3"],
"param_add": [],
"operation": ["+"]
}
}
| 5,200 | 31.304348 | 86 | py |
Fitter | Fitter-master/sedfit/dir_list.py | import os
__all__ = ["root_path", "filter_path", "template_path"]
#-> Obtain the current path
pathList = os.path.abspath(__file__).split("/")
#-> Create the path to the root path
root_path = "/".join(pathList[0:-2]) + "/"
#-> Create the path to the filters
pathList[-1] = "filters/"
filter_path = "/".join(pathList)
#-> Create the path to the templates
pathList[-2] = "template/"
template_path = "/".join(pathList[0:-1])
if __name__ == "__main__":
print root_path
| 470 | 26.705882 | 55 | py |
Fitter | Fitter-master/sedfit/sedmodel.py | ## The class of the models for the SED fitting.
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from fitter import basicclass as bc
from SED_Toolkit import WaveFromMicron, WaveToMicron
__all__ = ["SedModel"]
ls_mic = 2.99792458e14 # micron/s
class SedModel(bc.ModelCombiner):
def __init__(self, input_model_dict, func_lib, x_list, par_add_dict_all={},
QuietMode=False, **kwargs):
"""
Generate the ModelClass object from the input model dict.
Parameters
----------
input_model_dict : dict (better to be ordered dict)
The dict of input model informations.
An example of the format of the dict elements:
"Slope": { # The name of the model is arbitrary.
"function": "Linear" # Necessary to be exactly the same as
# the name of the variable.
"a": { # The name of the first parameter.
"value": 3., # The value of the parameter.
"range": [-10., 10.], # The prior range of the parameter.
"type": "c", # The type (continuous/discrete) of
# the parameter. Currently, it does
# not matter...
"vary": True, # The toggle whether the parameter is
# fixed (if False).
"latex": "$a$", # The format for plotting.
}
"b": {...} # The same format as "a".
}
func_lib : dict
The dict of the information of the functions.
An example of the format of the dict elements:
"Linear":{ # The function name should be exactly
# the same as the name of the function
# variable it refers to.
"x_name": "x", # The active variable of the function.
"param_fit": ["a", "b"], # The name of the parameters that are
# involved in fitting.
"param_add": [], # The name of the additional parameters
# necessary for the function.
"operation": ["+"] # The operation expected for this
# function, for consistency check.
# "+": to add with other "+" components.
# "*": to multiply to other "+"
# components. One model can be both "+"
# and "*".
x_list : array like
The default active variable for the model.
par_add_dict_all : dict
The additional parameters for all the models in input_model_dict.
**kwargs : dict
Additional keywords for the ModelCombiner.
Returns
-------
sed_model : ModelCombiner object
The combined SED model.
Notes
-----
This is mainly from bc.Model_Generator.
"""
modelDict = OrderedDict()
modelNameList = input_model_dict.keys()
for modelName in modelNameList:
funcName = input_model_dict[modelName]["function"]
funcInfo = func_lib[funcName]
xName = funcInfo["x_name"]
#-> Build up the parameter dictionaries
parFitList = funcInfo["param_fit"]
parAddList = funcInfo["param_add"]
parFitDict = OrderedDict()
parAddDict = {}
for parName in parFitList:
parFitDict[parName] = input_model_dict[modelName][parName]
for parName in parAddList:
par_add_iterm = par_add_dict_all.get(parName, "No this parameter")
if par_add_iterm == "No this parameter":
pass
else:
parAddDict[parName] = par_add_iterm
#-> Check the consistency if the component is multiply
multiList = input_model_dict[modelName].get("multiply", None)
if not multiList is None:
#--> The "*" should be included in the operation list.
assert "*" in funcInfo["operation"]
if not QuietMode:
print "[Model_Generator]: {0} is multiplied to {1}!".format(modelName, multiList)
#--> Check further the target models are not multiplicative.
for tmn in multiList:
f_mlt = input_model_dict[tmn].get("multiply", None)
if not f_mlt is None:
raise ValueError("The multiList includes a multiplicative model ({0})!".format(tmn))
modelDict[modelName] = bc.ModelFunction(funcName, xName, parFitDict,
parAddDict, multiList)
bc.ModelCombiner.__init__(self, modelDict, x_list, **kwargs)
def plot(self, wave=None, colorList=None, FigAx=None, DisplayPars=False,
tKwargs=None, cKwargs={}, useLabel=True, xUnits="micron", yUnits="fnu"):
"""
Plot the SED model. The working wavelength units is micron and the
model output units is assumed mJy.
Parameters
----------
wave (optional): array_like
The array of the wavelength, default units: micron.
colorList (optional): list
The list of the colors for each model components.
FigAx (optional): tuple
The tuple of (fig and ax).
DisplayPars (optional): bool, default: False
Display the parameter values in the figure.
tKwargs (optional): dict or None by default
The keywords to display the total model results.
cKwargs (optional): dict
The keywords to display the component model results.
useLabel (optional): bool
If True, add the label of the model into the legend. Defualt: True.
xUnits (optional): string
The units of the x-axis, default: micron. Currently supported units
are:
"cm", "mm", "micron", "angstrom", "Hz", "MHz", "GHz"
yUnits (optional): string
The form of the y-axis, default: fnu.
fnu -- mJy
nufnu -- erg s^-1 cm^-2
Returns
-------
FigAx : tuple
The tuple of (fig and ax).
Notes
-----
May further adopt more units.
"""
if wave is None:
wave = self.get_xList()
else:
wave = WaveToMicron(wave, xUnits) # Convert the units to micron
if FigAx is None:
fig = plt.figure()
ax = plt.gca()
FigAx = (fig, ax)
else:
fig, ax = FigAx
modelDict = self.get_modelDict()
modelList = self.get_modelAddList() #modelDict.keys()
TextIterm = lambda text, v1, v2: text.format(v1, v2)
textList = []
yTotal = self.combineResult(x=wave)
yCmpnt = self.componentResult(x=wave) #The best-fit components
if yUnits == "fnu": # Default settings, units: mJy
pass
elif yUnits == "nufnu": # Convert from mJy to erg s^-1 cm^-2
y_conv = ls_mic / wave * 1.e-26
yTotal *= y_conv
for modelName in modelList:
yCmpnt[modelName] *= y_conv
else:
raise ValueError("The yUnits ({0}) is not recognised!".format(yUnits))
if colorList is None:
colorList = ["orange", "green", "blue", "magenta", "yellow", "cyan"]
nColor = len(colorList)
x = WaveFromMicron(wave, xUnits) # Switch the wavelength units back to
#what assigned
counter = 0
for modelName in modelList:
textList.append( "<{0}>\n".format(modelName) )
mf = modelDict[modelName]
parFitDict = mf.parFitDict
for parName in parFitDict.keys():
textList.append( TextIterm("{0}: {1:.2f}\n", parName,
parFitDict[parName]["value"]) )
y = yCmpnt[modelName]
if useLabel:
cLabel = modelName
else:
cLabel = None
ax.plot(x, y, color=colorList[counter%nColor], label=cLabel, **cKwargs)
counter += 1
if useLabel:
tLabel = "Total"
else:
tLabel = None
if tKwargs is None:
ax.plot(x, yTotal, color="k", label=tLabel)
else:
ax.plot(x, yTotal, label=tLabel, **tKwargs)
text = "".join(textList)
if DisplayPars:
ax.text(1.02, 1.0, text, #bbox=dict(facecolor="white", alpha=0.75),
verticalalignment="top", horizontalalignment="left",
transform=ax.transAxes, fontsize=14)
return FigAx
if __name__ == "__main__":
import sys
import importlib
from sedfit.dir_list import root_path
import sedfit.model_functions as sedmf
def configImporter(configfile):
"""
This function import the provided configure file.
Parameters
----------
configfile : string
The name of the configure file (with the path).
Returns
-------
config : module object
The imported module.
Notes
-----
None.
"""
pathList = configfile.split("/")
configPath = "/".join(pathList[0:-1])
sys.path.append(configPath)
configName = pathList[-1].split(".")[0]
config = importlib.import_module(configName)
return config
config = configImporter("/Users/shangguan/Work/Fitter/configs/config_goals_hon.py")
funcLib = sedmf.funcLib
waveModel = config.waveModel
modelDict = config.modelDict
try:
parAddDict_all = config.parAddDict_all
except:
parAddDict_all = {}
parAddDict_all["DL"] = config.distance
parAddDict_all["z"] = config.redshift
parAddDict_all["frame"] = "rest"
sedModel = SedModel(modelDict, funcLib, waveModel, parAddDict_all)
| 10,604 | 41.083333 | 108 | py |
Fitter | Fitter-master/sedfit/bandfunc.py | from __future__ import print_function
import numpy as np
from scipy.interpolate import interp1d, splrep, splev
import cPickle as pickle
ls_mic = 2.99792458e14 #micron/s
def BandAverage(datawave, dataflux, bandwave, bandrsr):
"""
This code calculate the band average of the given spectrum with the given
bandpass information.
Parameters
----------
datawave : float array
The spectral wavelength.
dataflux : float array
The spectral flux density.
bandwave : float array
The wavelength of the response curve.
bandrsr : float array
The relative spectral response curve.
Returns
-------
ff : float
The average flux density of the filter.
Notes
-----
None.
"""
if datawave[-1] <= datawave[0]:
raise ValueError("The data wavelength is incorrect in sequence!")
if bandwave[-1] <= bandwave[0]:
raise ValueError("The filter wavelength is incorrect in sequence!")
filtering = interp1d(bandwave, bandrsr)
fltr = (datawave > bandwave[0]) & (datawave < bandwave[-1])
dw = datawave[fltr] #Make the data wavelength not exceeding the provided band wavelength
df = dataflux[fltr]
br = filtering(dw) #Calculate the bandrsr at the same wavelength of the data
signal = np.trapz(br/dw*df, x=dw)
norm = np.trapz(br/dw, x=dw)
ff = signal/norm
return ff
def BandFunc_intp(kwargs):
"""
This function calculate interpolate the input spectrum
to obtain the flux processed by the bandpass.
Parameters
----------
kwargs : dict
The dict of all the necessary parameters.
Returns
-------
fluxFltr : float
The flux density obtained by the band.
Notes
-----
None.
"""
wavelength = kwargs['wavelength']
flux = kwargs['flux']
bandCenter = kwargs['band_center']
fluxFltr = interp1d(wavelength, flux)(bandCenter)
return fluxFltr
def BandFunc_Herschel(kwargs):
"""
This function calculate the flux density one of the Herschel band obtains.
Reference: Section 5.2.4, SPIRE Handbook.
Parameters
----------
kwargs : dict
The dict of all the necessary parameters.
Returns
-------
fluxFltr : float
The flux density obtained by the band.
Notes
-----
None.
"""
K4pDict = { #The K4p parameter I calculated myself.
'PACS_70' : 0.994981,
'PACS_100' : 0.999526,
'PACS_160' : 1.004355,
'SPIRE_250': 1.010159,
'SPIRE_350': 1.009473,
'SPIRE_500': 1.005581,
}
wavelength = kwargs['wavelength']
rsrList = kwargs['rsr_list']
flux = kwargs['flux']
bandName = kwargs['band_name']
Sbar = np.trapz(rsrList*flux, wavelength) / np.trapz(rsrList, wavelength)
fluxFltr = K4pDict[bandName] * Sbar
return fluxFltr
def K_MonP(wave0, waveList, rsrList, alpha=-1):
'''
This is the correction factor from band average flux density to
the monochromatic flux density with assumed power-law function.
Parameters
----------
wave0 : float
The effective wavelength of the monochromatic flux density.
waveList: float
The wavelength of the relative spectral response curve.
rsrList : float array
The relative spectral response.
alpha : float
The power-law index of the assumed power-law spectrum; default: -1.
Returns
-------
kmonp : float
The monochromatic correction factor.
Notes
-----
None.
'''
freq = ls_mic / waveList
nu0 = ls_mic / wave0
spc = freq**alpha
k1 = nu0**alpha * np.trapz(rsrList, freq)
k2 = np.trapz(rsrList*spc, freq)
kmonp = k1 / k2
return kmonp
class BandPass(object):
"""
A class to represent one filter bandpass of a instrument.
It contains the bandpass information and can convert the spectra
into the band flux density. The user should understand how the
instrument works to obtain the measured flux density, in order to
correctly use the relative spectral response (rsr) curve and the
band wavelength.
Parameters
----------
waveList : float array
The wavelength of the relative system response curve. The provided
wavelength should be always in the observed frame.
rsrList : float array
The relative system response curve.
bandCenter : float
The quoted wavelength for the band in the observed frame.
bandType : string
The type of band functions.
bandName : string
The name of the band.
redshift : float
The redshift of the source. Since the SED model is in the rest frame, we
need to move the wavelength of the filter rsr curve into the rest frame.
silent : bool
Stop printing information if True, by default.
"""
def __init__(self, waveList=None, rsrList=None, bandCenter=None, bandType="mean", bandName='None', redshift=0, silent=True):
self.__bandType = bandType
self.redshift = redshift
if not bandType == "none":
assert not waveList is None
assert not rsrList is None
#->Move the wavelength of the bandpass to match the rest-frame SED.
waveList = np.array(waveList) / (1 + redshift)
waveMin = waveList[0]
waveMax = waveList[-1]
if waveMin >= waveMax: #-> The waveList should be from small to large.
raise ValueError("The waveList sequence is incorrect!")
if len(waveList) == len(rsrList):
self.__waveList = waveList
self.__rsrList = rsrList
self._bandName = bandName
else:
raise ValueError("The inputs are not matched!")
self.__filtertck = splrep(waveList, rsrList)
if bandCenter is None: #The bandCenter is not specified, the effective wavelength will
#be used (Eq. A21, Bessell&Murphy 2012), assuming fnu~const.
bandCenter = (1 + redshift) * np.trapz(rsrList, waveList)/np.trapz(rsrList/waveList, waveList)
if not silent:
print("Band {0} center wavelength ({1}) is calculated!".format(bandName, bandCenter))
self.__bandCenter = bandCenter
self.__bandCenter_rest = bandCenter / (1 + redshift)
if bandType == "mono":
self.k4p = K_MonP(self.__bandCenter_rest, waveList, rsrList, alpha=-1)
if not silent:
print("Band {0} calculates the monochromatic flux density!".format(bandName))
elif bandType == "mean":
if not silent:
print("Band {0} calculates the averaged flux density!".format(bandName))
else:
raise ValueError("The input bandType ({0}) is incorrect!".format(bandType))
else:
assert waveList is None
assert rsrList is None
assert not bandCenter is None
self.__waveList = None
self.__rsrList = None
self.__bandCenter = bandCenter
self.__bandCenter_rest = bandCenter / (1 + redshift)
self.__bandName = bandName
if not silent:
print("Band {0} ({1}) does not have bandpass.".format(bandName, bandCenter))
def get_bandCenter(self):
return self.__bandCenter
def get_bandCenter_rest(self):
return self.__bandCenter_rest
def get_bandpass(self):
bandInfo = {
"wave_list": self.__waveList,
"rsr_list": self.__rsrList,
"band_name": self._bandName,
"band_type": self.__bandType,
"wave_center": self.__bandCenter,
"wave_center_rest": self.__bandCenter_rest,
"redshift": self.redshift
}
return bandInfo
def BandFunc_mono(self, wavelength, flux):
"""
Calculate the monochromatic flux density with the given data. The function
applies for the bolometers used by IR satellites.
To use this function, the relative spectral response should be for the
bolometer (energy/photon) instead of the CCD (electron/photon).
Reference: Section 5.2.4, SPIRE Handbook.
Parameters
----------
wavelength : float array
The wavelength of the spectrum and the relative spectral response.
flux : float array
The flux of the spectrum.
Returns
-------
fluxFltr : float
The monochromatic flux density calculated from the filter rsr.
Notes
-----
To convert the relative spectral response from electron/photon to
electron/energy is simply:
S(electron/energy) = S(electron/photon) / nu
where nu is the corresponding frequency (Bessell & Murphy 2012).
"""
waveMin = self.__waveList[0]
waveMax = self.__waveList[-1]
fltr = (wavelength > waveMin) & (wavelength < waveMax)
if np.sum(fltr) == 0:
raise ValueError("The wavelength is not overlapped with the filter!")
wavelength = wavelength[fltr]
freq = ls_mic / wavelength
flux = flux[fltr]
rsrList = splev(wavelength, self.__filtertck)
Sbar = np.trapz(rsrList*flux, freq) / np.trapz(rsrList, freq)
fluxFltr = self.k4p * Sbar
return fluxFltr
def BandFunc_mean(self, wavelength, flux):
"""
Calculate the band averaged flux density with the given data.
By default, the rsr is photon response and the band flux is defined as
eq. A12 (Bessell&Murphy 2012). The rsr is for the CCD detector instead
of bolometers.
Parameters
----------
wavelength : float array
The wavelength of the spectrum and the relative spectral response.
flux : float array
The flux of the spectrum.
Returns
-------
fluxFltr : float
The monochromatic flux density calculated from the filter rsr.
Notes
-----
None.
"""
waveMin = self.__waveList[0]
waveMax = self.__waveList[-1]
fltr = (wavelength > waveMin) & (wavelength < waveMax)
if np.sum(fltr) == 0:
raise ValueError("The wavelength is not overlapped with the filter!")
wavelength = wavelength[fltr]
flux = flux[fltr]
rsrList = splev(wavelength, self.__filtertck)
signal = np.trapz(rsrList/wavelength*flux, x=wavelength)
norm = np.trapz(rsrList/wavelength, x=wavelength)
fluxFltr = signal/norm
return fluxFltr
def BandFunc_none(self, wavelength, flux):
"""
This band function provides the flux without the bandpass. The flux density
of the model SED at the wavelength closest to the band center is returned.
Parameters
----------
wavelength : float array
The wavelength of the spectrum and the relative spectral response.
flux : float array
The flux of the spectrum.
Returns
-------
fluxFltr : float
The flux density at the wavelength closest to the band center.
Notes
-----
None.
"""
bandCenter = self.__bandCenter_rest
wave_fdev = np.abs((wavelength - bandCenter) / bandCenter)
idx = np.argmin(wave_fdev)
if wave_fdev[idx] > 0.05:
print("[BandPass warning]: The wavelength deviation at {0} ({1}) is large!".format(self._bandName, bandCenter))
fluxFltr = flux[idx]
return fluxFltr
def filtering(self, wavelength, flux):
"""
Calculate the flux density of the input spectrum filtered by the bandpass.
Parameters
----------
wavelength : float array
The wavelength of the input spectrum.
flux : float array
The flux of the input spectrum.
Returns
-------
fluxFltr : float
The flux density of the spectrum after filtered by the bandpass.
Notes
-----
None.
"""
bandCenter = self.__bandCenter_rest
wvMin = wavelength[0]
wvMax = wavelength[-1]
if( (bandCenter <= wvMin) or (bandCenter >= wvMax) ):
raise ValueError("The band center '{0}' is out of the wavelength range '[{1}, {2}]!".format(bandCenter, wvMin, wvMax))
bandType = self.__bandType
if bandType == "mean": #By default, the rsr is photon response and the band flux
#is defined as eq. A12 (Bessell&Murphy 2012).
fluxFltr = self.BandFunc_mean(wavelength, flux)
elif bandType == "mono": #Use the user specified function to get the filtered flux.
fluxFltr = self.BandFunc_mono(wavelength, flux)
else: #If there is no filter, direct calculate the flux density.
fluxFltr = self.BandFunc_none(wavelength, flux)
return fluxFltr
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
#Default filters
filterDict = {
"SDSS_u": 0.36069,
"SDSS_g": 0.46729,
"SDSS_r": 0.61415,
"SDSS_i": 0.74585,
"SDSS_z": 0.89247,
"UKIDSS_Y": 1.03050,
"UKIDSS_J": 1.24830,
"UKIDSS_H": 1.63130,
"UKIDSS_K": 2.20100,
"2MASS_J": 1.235,
"2MASS_H": 1.662,
"2MASS_Ks": 2.159,
"WISE_w1": 3.353,
"WISE_w2": 4.603,
"WISE_w3": 11.561,
"WISE_w4": 22.088,
"Herschel_PACS_70": 70.,
"Herschel_PACS_100": 100.,
"Herschel_PACS_160": 160.,
"Herschel_SPIRE_250": 250.,
"Herschel_SPIRE_350": 350.,
"Herschel_SPIRE_500": 500.,
"Spitzer_IRAC1": 3.550,
"Spitzer_IRAC2": 4.493,
"Spitzer_IRAC3": 5.731,
"Spitzer_IRAC4": 7.872,
"IRAS_12": 12.,
"IRAS_25": 25.,
"IRAS_60": 60.,
"IRAS_100": 100.,
"Spitzer_MIPS_24": 24.,
"Spitzer_MIPS_70": 70.,
"Spitzer_MIPS_160": 160.,
"JCMT_SCUBA1_450": 450.,
"JCMT_SCUBA1_850": 850.,
}
monoFilters = ["Herschel_PACS_70", "Herschel_PACS_100", "Herschel_PACS_160",
"Herschel_SPIRE_250", "Herschel_SPIRE_350", "Herschel_SPIRE_500",
"Herschel_SPIRE_250_e", "Herschel_SPIRE_350_e", "Herschel_SPIRE_500_e",
"Spitzer_IRAC1", "Spitzer_IRAC2", "Spitzer_IRAC3", "Spitzer_IRAC4",
"Spitzer_MIPS_24", "Spitzer_MIPS_70", "Spitzer_MIPS_160",
"IRAS_12", "IRAS_25", "IRAS_60", "IRAS_100"]
meanFilters = ["SDSS_u", "SDSS_g", "SDSS_r", "SDSS_i", "SDSS_z",
"2MASS_J", "2MASS_H", "2MASS_Ks",
"UKIDSS_Y", "UKIDSS_J", "UKIDSS_H", "UKIDSS_K",
"WISE_w1", "WISE_w2", "WISE_w3", "WISE_w4"]
if __name__ == "__main__":
from dir_list import filter_path as bandPath
import matplotlib.pyplot as plt
z = 1.5
bn = "Herschel_SPIRE_500"
bandFile = "{0}.dat".format(bn)
bandPck = np.genfromtxt(bandPath+bandFile)
bandWave = bandPck[:, 0]
bandRsr = bandPck[:, 1]
bandCenter = filterDict[bn]
bandType = "mono"
bp1 = BandPass(bandWave, bandRsr, bandCenter, bandType, bandName=bn, redshift=0, silent=False)
bp2 = BandPass(bandWave, bandRsr, bandCenter, bandType, bandName=bn, redshift=z, silent=False)
#bp2 = BandPass(bandCenter=bandCenter, bandType="none", bandName=bn, redshift=z, silent=False)
alpha = -1
f0 = 10.0
w0 = bandCenter
nu0 = ls_mic / w0
wave_0 = 10**np.linspace(0, 3, 1000)
freq = ls_mic / wave_0
flux_0 = f0*(freq / nu0)**alpha
wave_1 = wave_0 / (1 + z)
flux_1 = flux_0
#mF_0 = interp1d(wave_0, flux_0)
#mF_1 = interp1d(wave_1, flux_1)
#fb_0 = bp1.filtering(mF_0)
#fb_1 = bp2.filtering(mF_1)
fb_0 = bp1.filtering(wave_0, flux_0)
fb_1 = bp2.filtering(wave_1, flux_1)
print("w0={0}".format(w0))
print("f0={0}, fb1={1}".format(f0, fb_0))
print("f0={0}, fb2={1}".format(f0, fb_1))
plt.plot(wave_0, flux_0, ":r", label="z=0")
plt.plot(wave_1, flux_1, ":b", label="z={0}".format(z))
plt.plot(bp1.get_bandCenter_rest(), fb_0, linestyle="none", marker=".", color="r")
plt.plot(bp2.get_bandCenter_rest(), fb_1, linestyle="none", marker=".", color="b")
plt.xscale("log")
plt.yscale("log")
plt.legend()
plt.show()
| 16,436 | 33.97234 | 130 | py |
Fitter | Fitter-master/sedfit/__init__.py | 0 | 0 | 0 | py |
|
Fitter | Fitter-master/sedfit/fit_functions.py | import numpy as np
from scipy.special import erf
import george
from george import kernels
sqrt2 = np.sqrt(2)
PI2 = 2. * np.pi
#-->There are three ways to define the chi-square function to consider the
#upperlimits. The ChiSq functions are defined as (-2)*lnLikelihood.
#->The ChiSq_erf is preferred since it smoothly change the chis quare and recently
#many works use it.
def ChiSq_erf(data, model, unct=None, flag=None):
'''
This function calculate the Chi square of the observed data and
the model. The upper limits are properly deal with using the method
mentioned by Sawicki (2012).
Parameters
----------
data : float array
The observed data.
model : float array
The model.
unct : float array
The uncertainties.
Returns
-------
chsq : float
The Chi square
Notes
-----
None.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(PI2 * unct[fltr_dtc]**2) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
wrsd_non = (data[fltr_non] - model[fltr_non])/(unct[fltr_non] * sqrt2)
chsq_non = -2.* np.sum( np.log( 0.5 * (1 + erf(wrsd_non)) ) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
def ChiSq_hg(data, model, unct=None, flag=None):
'''
This is a generalized chi-square function that allows y to be upperlimits.
It contributes zero to the chi square that the model is below the upperlimits,
while it contributes as the normal detected points whtn the model is above
the upperlimits.
Parameters
----------
data : float array
The observed data and upperlimits.
model : float array
The model.
unct : float array or Nobe by default
The uncertainties.
flag : float array or None by default
The flag of upperlimits, 0 for detection and 1 for upperlimits.
Returns
-------
chsq : float
The Chi square
Notes
-----
This chi-square form consider the x and y asymmetrically except for some special
situations.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(PI2 * unct[fltr_dtc]**2) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
data_non = data[fltr_non]
model_non = model[fltr_non]
unct_non = unct[fltr_non]
wrsd_non = np.zeros_like(data_non)
#Only the when the model is above the upperlimit, it contributes to the chi square.
fltr = model_non > data_non
wrsd_non[fltr] = (model_non[fltr] - data_non[fltr]) / unct_non[fltr]
chsq_non = np.sum(wrsd_non**2.) + np.sum( np.log(PI2 * unct_non[fltr]**2.) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
def ChiSq_af(data, model, unct=None, flag=None):
'''
This is a generalized chi-square function that allows y to be upperlimits.
It consider the upperlimits with the method by the AGN fitter (Calistro Rivera et al. 2016).
Parameters
----------
data : float array
The observed data and upperlimits.
model : float array
The model.
unct : float array or Nobe by default
The uncertainties.
flag : float array or None by default
The flag of upperlimits, 0 for detection and 1 for upperlimits.
Returns
-------
chsq : float
The Chi square
Notes
-----
This chi-square form consider the x and y asymmetrically except for some special
situations.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(PI2 * unct[fltr_dtc]**2) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
data_non = 0.5 * data[fltr_non]
model_non = model[fltr_non]
unct_non = 0.5 * data[fltr_non]
#Only the when the model is above the upperlimit, it contributes to the chi square.
wrsd_non = (model_non - data_non) / unct_non
chsq_non = np.sum(wrsd_non**2.) + np.sum( np.log(PI2 * unct_non**2.) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
ChiSq = ChiSq_erf
#Model to data function#
def Model2Data(sedModel, sedData):
"""
Convert the continual model to the data-like model to directly
compare with the data.
Parameters
----------
sedModel : ModelCombiner object
The combined model.
sedData : SEDClass object
The data set of SED.
Returns
-------
fluxModel : list
The model flux list of the data.
Notes
-----
None.
"""
waveModel = sedModel.get_xList()
fluxModel = sedModel.combineResult()
fluxModelPht = sedData.model_pht(waveModel, fluxModel)
fluxModelSpc = sedData.model_spc(sedModel.combineResult)
fluxModel = fluxModelPht + fluxModelSpc
return fluxModel
#Model to data function using Gaussian process regression
def Model2Data_gp(sedModel, sedData):
"""
Convert the continual model to the data-like model to directly
compare with the data.
Parameters
----------
sedModel : ModelCombiner object
The combined model.
sedData : SEDClass object
The data set of SED.
Returns
-------
fluxModel : list
The model flux list of the data.
Notes
-----
None.
"""
waveModel = sedModel.get_xList()
fluxModel = sedModel.combineResult()
fluxModelPht = sedData.model_pht(waveModel, fluxModel)
fluxModelSpc = sedData.model_spc(sedModel.combineResult)
fluxDict = {
"pht": fluxModelPht,
"spc": fluxModelSpc
}
return fluxDict
#The log_likelihood function: for SED fitting
def logLFunc(params, data, model):
"""
Calculate the likelihood of data according to the model and its parameters.
Parameters
----------
params : list
The variable parameter list of the model.
data : DataSet
The data need to fit.
model : ModelCombiner
The model to fit the data.
Returns
-------
logL : float
The log likelihood.
Notes
-----
None.
"""
model.updateParList(params)
y = np.array(data.get_List('y'))
e = np.array(data.get_List('e'))
f = np.array(data.get_List('f'))
ym = np.array(Model2Data(model, data))
logL = -0.5 * ChiSq(y, ym, e, f)
return logL
#The log_likelihood function: for SED fitting using Gaussian process regression
def logLFunc_gp(params, data, model):
"""
Calculate the likelihood of data according to the model and its parameters.
Parameters
----------
params : list
The variable parameter list of the model.
data : DataSet
The data need to fit.
model : ModelCombiner
The model to fit the data.
Returns
-------
lnL : float
The ln likelihood.
Notes
-----
None.
"""
#Get the data and error
xSpc = np.array(data.get_csList("x"))
yPht = np.array(data.get_dsList("y"))
ySpc = np.array(data.get_csList("y"))
ePht = np.array(data.get_dsList("e"))
eSpc = np.array(data.get_csList("e"))
fPht = np.array(data.get_dsList("f"))
#Calculate the model
model.updateParList(params)
yDict = Model2Data_gp(model, data)
yPhtModel = np.array(yDict["pht"])
ySpcModel = np.array(yDict["spc"])
nParVary = len(model.get_parVaryList())
#lnlikelihood for photometric data
if len(yPhtModel):
f = np.exp(params[nParVary]) #The parameter to control the model incompleteness
nParVary += 1
sPht = np.sqrt(ePht**2 + (yPhtModel * f)**2)
lnlPht = -0.5 * ChiSq(yPht, yPhtModel, sPht, fPht)
else:
f = 0
lnlPht = 0
#lnlikelihood for spectral data using Gaussian process regression
if len(ySpcModel):
a, tau = np.exp(params[nParVary:]) #The covariance for spectral residual
a = a * data.spc_FluxMedian #Make "a" a relative value
tau = tau * data.spc_WaveLength #Make "tau" a relative value
gp = george.GP(a * kernels.Matern32Kernel(tau))
sSpc = np.sqrt(eSpc**2 + (ySpcModel * f)**2)
gp.compute(xSpc, sSpc)
lnlSpc = gp.lnlikelihood(ySpc - ySpcModel)
else:
lnlSpc = 0
lnL = lnlPht + lnlSpc
return lnL
| 9,058 | 28.412338 | 96 | py |
Fitter | Fitter-master/sedfit/SED_Toolkit.py |
# coding: utf-8
# # This page is to release the functions to manipulate the SEDs and spectra
# * The prototype of this page is [SEDToolKit](http://localhost:8888/notebooks/SEDFitting/SEDToolKit.ipynb) in /Users/jinyi/Work/PG_QSO/SEDFitting/
# In[2]:
import numpy as np
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from astropy.table import Table
from scipy.interpolate import interp1d
__all__ = ["Plot_SED", "SpectraRebin", "SEDResample", "Filter_Pass",
"Herschel_Bands", "Load_SED", "SED_select_band", "SED_to_restframe",
"SED_to_obsframe", "WaveToMicron", "WaveFromMicron"]
ls_mic = 2.99792458e14 # micron/s
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Dec. 13, 2015 #
# Modified by SGJY, May. 19, 2016 #
#-------------------------------------#
#From: /PG_QSO/catalog/Data_SG/wrk_SED_Combine.ipynb
def Plot_SED(wave,
flux,
sigma,
flag,
ebDict=None,
TargetName=None,
RestFrame=False,
FigAx=None,
gridon=True,
Quiet=False):
'''
This function is to conveniently plot the SED. The parameters are:
wavelength: the array of wavelength.
sed: the fluxes or upperlimits at the corresponding wavelength.
sigma: the uncertainties for detecion and -1 for non-detection at the corresponding wavelength.
TargetName: the name of the target which will be shown in the figure.
RestFrame: if False, the xlabel would be lambda_obs, while if True, the xlabel would be lambda_rest.
FigAx: if not None, FigAx should be a tuple with (fig, ax) in it.
'''
wave = np.atleast_1d(wave)
flux = np.atleast_1d(flux)
sigma = np.atleast_1d(sigma)
flag = np.atleast_1d(flag)
if((len(wave)!=len(flux))|(len(wave)!=len(sigma))|(len(wave)!=len(flag))):
raise Exception('Array lengths are unequal!')
return None
if(len(wave) == 0):
if Quiet is False:
print 'There is no data in the SED!'
return FigAx
if FigAx == None:
fig = plt.figure(figsize=(5, 5))
ax = fig.gca()
else:
fig = FigAx[0]
ax = FigAx[1]
if ebDict is None:
ebDict = {
"linestyle": "none",
"ms": 6,
"mew": 1.5,
"elinewidth": 1.5,
"color": "black",
"fmt": "o",
"capsize": 0
}
ax.errorbar(wave, flux, yerr=sigma, uplims=flag, **ebDict)
if(RestFrame == True):
str_xlabel = r'$\lambda_\mathrm{rest} \, \mathrm{(\mu m)}$'
else:
str_xlabel = r'$\lambda_\mathrm{obs} \, \mathrm{(\mu m)}$'
ax.set_xlabel(str_xlabel, fontsize=24)
ax.set_ylabel(r'$f_\nu \, \mathrm{(mJy)}$', fontsize=24)
ax.set_xscale('log')
ax.set_yscale('log')
if gridon:
ax.grid(which='both')
if TargetName is not None:
ax.text(0.45, 0.9, TargetName,
verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes, fontsize=24)
ax.tick_params(labelsize=18)
return fig, ax
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Jan. 26, 2016 #
# Modified by SGJY, Mar. 23, 2016 #
# Modified by SGJY, May. 4, 2016 #
#-------------------------------------#
def SpectraRebin(bin_rsl, spcwave, spcflux, spcsigma):
'''
This function rebin the spectra according the bin resolution provided in
parameters. The median value in each flux bin are used. The root of square
mean value are adapted in each uncertainty bin.
Parameters
----------
bin_rsl : int
The resolution of each bin. How many data go into one bin.
spcwave : float array
The array of spectra wavelength.
spcflux : float array
The array of spectra flux.
spcsigma : float array
The array of spectra uncertainty.
Returns
-------
A tuple of three arrays (binwave, binflux, binsigma).
binwave : float array
The array of binned spectra wavelength.
binflux : float array
The array of binned spectra flux.
binsigma : float array
The array of binned spectra uncertainty.
Notes
-----
None.
'''
if bin_rsl <= 1.0:
return (spcwave, spcflux, spcsigma)
len_spc = int(len(spcwave)/bin_rsl)
binwave = np.zeros(len_spc)
binflux = np.zeros(len_spc)
binsigma = np.zeros(len_spc)
for loop_spc in range(len_spc):
indx_bng = bin_rsl * loop_spc
indx_end = bin_rsl * (loop_spc + 1)
select_wave = spcwave[indx_bng:indx_end]
select_flux = spcflux[indx_bng:indx_end]
select_sigma = spcsigma[indx_bng:indx_end]
fltr = np.logical_not(np.isnan(select_flux))
binwave[loop_spc] = np.mean(select_wave[fltr])
binflux[loop_spc] = np.median(select_flux[fltr])
binsigma[loop_spc] = np.sqrt(np.sum(select_sigma[fltr])**2.0)/len(select_sigma[fltr])
return (binwave, binflux, binsigma)
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Jan. 24, 2016 #
#-------------------------------------#
def SEDResample(wave, flux, wave_resmp):
'''
This function is to resample the SED at given wavelengths.
Parameters
----------
wave : array like
The wavelengths of the template.
flux : array like
The fluxes of the template.
wave_resmp : array like
The wavelengths for resampled SED.
Returns
-------
flux_resmp : array like
The fluxes for resampled SED.
Notes
-----
None.
'''
fsed = interp1d(wave, flux, kind='linear')
flux_resmp = fsed(wave_resmp)
return flux_resmp
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Mar. 7, 2016 #
#-------------------------------------#
def Filter_Pass(wave, flux, fltr_wv, fltr_rs):
'''
This function is to calculate the flux density of a model spectrum
passing the given filter. The flux density is calculate as the
average of the model spectrum weighted by the filter responce curve.
Parameters
----------
wave : float array
The model wavelength.
flux : float array
The model spectral flux.
fltr_wv : float array
The wavelength of the response curve.
fltr_rs : float array
The fractional flux density transmission.
Returns
-------
fd : float
The flux density.
Notes
-----
None.
'''
model_func = interp1d(wave, flux, kind='linear')
model_flux = model_func(fltr_wv)
fd = np.trapz(model_flux * fltr_rs, fltr_wv)/np.trapz(fltr_rs, fltr_wv)
return fd
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Mar. 7, 2016 #
#-------------------------------------#
def Herschel_Bands(wave, flux,
pacs_70=None,
pacs_100=None,
pacs_160=None,
spire_250=None,
spire_350=None,
spire_500=None):
'''
This function calculate the photometric data points from the model spectrum.
Parameters
----------
wave : float array
The model wavelength.
flux : float array
The model flux.
pacs_70 : tuple, (wave, response), default: None
The wavelength and response fraction of the 70 micron filter.
pacs_100 : tuple, (wave, response), default: None
The wavelength and response fraction of the 100 micron filter.
pacs_160 : tuple, (wave, response), default: None
The wavelength and response fraction of the 160 micron filter.
spire_250 : tuple, (wave, response), default: None
The wavelength and response fraction of the 250 micron filter.
spire_350 : tuple, (wave, response), default: None
The wavelength and response fraction of the 350 micron filter.
spire_500 : tuple, (wave, response), default: None
The wavelength and response fraction of the 500 micron filter.
Returns
-------
(photo_wave, photo_flxd) : tuple
The wavelength of the photometric points and the corresponding
flux density.
Notes
-----
None.
'''
if pacs_70 is None:
fd_70 = np.nan
else:
pacs_70_wv = pacs_70[0]
pacs_70_rs = pacs_70[1]
fd_70 = Filter_Pass(wave, flux, pacs_70_wv, pacs_70_rs)
if pacs_100 is None:
fd_100 = np.nan
else:
pacs_100_wv = pacs_100[0]
pacs_100_rs = pacs_100[1]
fd_100 = Filter_Pass(wave, flux, pacs_100_wv, pacs_100_rs)
if pacs_160 is None:
fd_160 = np.nan
else:
pacs_160_wv = pacs_160[0]
pacs_160_rs = pacs_160[1]
fd_160 = Filter_Pass(wave, flux, pacs_160_wv, pacs_160_rs)
if spire_250 is None:
fd_250 = np.nan
else:
spire_250_wv = spire_250[0]
spire_250_rs = spire_250[1]
fd_250 = Filter_Pass(wave, flux, spire_250_wv, spire_250_rs)
if spire_350 is None:
fd_350 = np.nan
else:
spire_350_wv = spire_350[0]
spire_350_rs = spire_350[1]
fd_350 = Filter_Pass(wave, flux, spire_350_wv, spire_350_rs)
if spire_500 is None:
fd_500 = np.nan
else:
spire_500_wv = spire_500[0]
spire_500_rs = spire_500[1]
fd_500 = Filter_Pass(wave, flux, spire_500_wv, spire_500_rs)
photo_wave = np.array([70., 100., 160., 250., 350., 500.])
photo_flxd = np.array([fd_70, fd_100, fd_160, fd_250, fd_350, fd_500])
return (photo_wave, photo_flxd)
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Mar. 28, 2016 #
#-------------------------------------#
#From: wrk_Plot_Models.ipynb
def Load_SED_old(sedfile, sed_range=[7, 13], spc_range=[13, None], spc_binrsl=10.):
'''
This function is to load the SED data and compile it for use.
Parameters
----------
sedfile : str
The full path of sed file.
sed_range : list, default: [7, 13]
The min and max+1 index of the sed data points.
spc_range : list, default: [13, None]
The min and max+1 index of the spectra data points.
spc_binrsl : float
The resolution to rebin the spectra data.
Returns
-------
sed_package : dict
The dictionary storing:
sed_cb : combined sed
sed : photometric data
spc : spectra data
spc_raw : unbinned spectra data
Notes
-----
None.
'''
sedtb = Table.read(sedfile, format="ascii")
wave = sedtb["wavelength"].data
flux = sedtb["flux"].data
sigma = sedtb["sigma"].data
sedwave = wave[sed_range[0]:sed_range[1]]
sedflux = flux[sed_range[0]:sed_range[1]]
sedsigma = sigma[sed_range[0]:sed_range[1]]
spcwave_raw = wave[spc_range[0]:spc_range[1]]
spcflux_raw = flux[spc_range[0]:spc_range[1]]
spcsigma_raw = sigma[spc_range[0]:spc_range[1]]
spcbin = SpectraRebin(spc_binrsl, spcwave_raw, spcflux_raw, spcsigma_raw)
spcwave = spcbin[0]
spcflux = spcbin[1]
spcsigma = spcbin[2]
wave_cb = np.concatenate([sedwave, spcwave])
flux_cb = np.concatenate([sedflux, spcflux])
sigma_cb = np.concatenate([sedsigma, spcsigma])
sed_cb = (wave_cb, flux_cb, sigma_cb)
sed = (sedwave, sedflux, sedsigma)
spc = (spcwave, spcflux, spcsigma)
spc_raw = (spcwave_raw, spcflux_raw, spcsigma_raw)
sed_package = {
'sed_cb':sed_cb,
'sed':sed,
'spc':spc,
'spc_raw':spc_raw,
}
return sed_package
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Jan. 8, 2017 #
#-------------------------------------#
def Load_SED(sedfile):
'''
This function is to load the SED data and compile it for use.
Parameters
----------
sedfile : str
The full path of sed file.
Returns
-------
sed_package : dict
The dictionary storing:
sed_cb : combined sed; (wave, flux, sigma)
sed : photometric data; (wave, flux, sigma, band)
spc : spectra data; (wave, flux, sigma)
Notes
-----
The returned SED data are in the lists instead of the numpy.array.
'''
sedtb = Table.read(sedfile, format="ascii")
wave = sedtb["wavelength"].data
flux = sedtb["flux"].data
sigma = sedtb["sigma"].data
band = sedtb["band"].data.astype("str")
fltr_spc_use = band == "0" # Spectral data used
fltr_spc_drp = band == "-1" # Spectral data dropped
fltr_pht = np.logical_not(fltr_spc_use | fltr_spc_drp)
phtwave = wave[fltr_pht]
phtflux = flux[fltr_pht]
phtsigma = sigma[fltr_pht]
phtband = band[fltr_pht]
spcwave = wave[fltr_spc_use]
spcflux = flux[fltr_spc_use]
spcsigma = sigma[fltr_spc_use]
sedwave = np.concatenate([phtwave, spcwave])
sedflux = np.concatenate([phtflux, spcflux])
sedsigma = np.concatenate([phtsigma, spcsigma])
sed = (list(sedwave), list(sedflux), list(sedsigma))
pht = (list(phtwave), list(phtflux), list(phtsigma), list(phtband))
spc = (list(spcwave), list(spcflux), list(spcsigma))
sed_package = {
'sed':sed,
'pht':pht,
'spc':spc,
}
return sed_package
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Jan. 9, 2017 #
#-------------------------------------#
def SED_select_band(sed, bandList_use=[], bandList_ignore=[], silent=True):
"""
Select the SED from the input band list.
Parameters
----------
sed : tuple
The tuple of the photometric SED data; (wave, flux, sigma, band).
bandList_use : list
The list of the bands that are used.
bandList_ignore : list
The list of the bands that are ignored.
silent : bool
Stop printing out information if True, by default.
Returns
-------
sed_select : tuple
The selected SED data that are used.
Notes
-----
None.
"""
wave = []
flux = []
sigma = []
band = []
if not len(bandList_use):
bandList_use = list(sed[3])
for bn in bandList_ignore:
if bn in bandList_use:
bandList_use.remove(bn)
else:
if not silent:
print("Warning: Band {0} is not included in the SED.".format(bn))
for bn in bandList_use:
idx = sed[3].index(bn)
if np.isnan(sed[1][idx]*sed[2][idx]):
if not silent:
print("Warning: Band {0} contains bad data!".format(bn))
else:
wave.append(sed[0][idx])
flux.append(sed[1][idx])
sigma.append(sed[2][idx])
band.append(sed[3][idx])
if len(band) == 0:
print("Warning: There is no band in use!")
sed_select = (wave, flux, sigma, band)
return sed_select
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Feb. 11, 2017 #
#-------------------------------------#
def SED_to_restframe(sed, redshift):
"""
Transform the input SED into its rest frame according to the given redshift.
Parameters
----------
sed : tuple
The input SED data in the observed frame. Assume it consists (wave, flux,
sigma, ...).
redshift : float
The redshift of the SED.
Returns
-------
sed_rest : tuple
The output SED data in the rest frame. The tuple consists all the same
data as the input except the first three items.
Notes
-----
None.
"""
wave = np.array(sed[0])
flux = np.array(sed[1])
sigma = np.array(sed[2])
fltr = sigma == -1
sigma[fltr] = sigma[fltr] / (1 + redshift)
sed_rest = list(sed)
sed_rest[0] = list( wave / (1 + redshift) )
sed_rest[1] = list( flux * (1 + redshift) )
sed_rest[2] = list( sigma * (1 + redshift) )
sed_rest = tuple(sed_rest)
return sed_rest
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Feb. 11, 2017 #
#-------------------------------------#
def SED_to_obsframe(sed, redshift):
"""
Transform the input SED into the observed frame according to the given redshift.
Parameters
----------
sed : tuple
The input SED data in the observed frame. Assume it consists (wave, flux,
sigma, ...).
redshift : float
The redshift of the SED.
Returns
-------
sed_obs : tuple
The output SED data in the observed frame. The tuple consists all the same
data as the input except the first three items.
Notes
-----
None.
"""
wave = np.array(sed[0])
flux = np.array(sed[1])
sigma = np.array(sed[2])
fltr = sigma == -1
sigma[fltr] = sigma[fltr] * (1 + redshift)
sed_obs = list(sed)
sed_obs[0] = list( wave * (1 + redshift) )
sed_obs[1] = list( flux / (1 + redshift) )
sed_obs[2] = list( sigma / (1 + redshift) )
sed_obs= tuple(sed_obs)
return sed_obs
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Mar. 25, 2018 #
#-------------------------------------#
def WaveToMicron(wave, units):
"""
Convert the wavelength units to micron.
Parameters
----------
wave : float array
The array of wavelength with units consistent with the 2nd parameter.
units : string
The units of the input wavelength. Currently supported units are:
"cm", "mm", "micron", "angstrom", "Hz", "MHz", "GHz"
Returns
-------
wave : float array
The array of wavelength with units micron.
"""
wave = np.atleast_1d(wave)
waveUnits = ["cm", "mm", "micron", "angstrom"]
freqUnits = ["Hz", "MHz", "GHz"]
if units in waveUnits:
pass
elif units in freqUnits:
wave = 1. / wave
else:
raise ValueError("The units ({0}) is not recognised!".format(units))
coeffDict = {# The coefficient to convert the units to micron
"cm": 1.e4,
"mm": 1.e3,
"micron": 1.,
"angstrom": 1e-4,
"Hz": 2.99792458e14,
"MHz": 2.99792458e8,
"GHz": 2.99792458e5,
}
wave = coeffDict[units] * wave
return wave
def WaveFromMicron(wave, units):
"""
Convert the wavelength units from micron to what asigned.
Parameters
----------
wave : float array
The array of wavelength, units: micron.
units : string
The units of the output wavelength. Currently supported units are:
"cm", "mm", "micron", "angstrom", "Hz", "MHz", "GHz"
Returns
-------
wave : float array
The array of wavelength with units consistent with the 2nd parameter.
"""
wave = np.atleast_1d(wave)
waveUnits = ["cm", "mm", "micron", "angstrom"]
freqUnits = ["Hz", "MHz", "GHz"]
if units in waveUnits:
pass
elif units in freqUnits:
wave = 1. / wave
else:
raise ValueError("The units ({0}) is not recognised!".format(units))
coeffDict = {# The coefficient to convert the units from micron to what asigned
"cm": 1.e-4,
"mm": 1.e-3,
"micron": 1.,
"angstrom": 1e4,
"Hz": 2.99792458e14,
"MHz": 2.99792458e8,
"GHz": 2.99792458e5,
}
wave = coeffDict[units] * wave
return wave
#Func_end
if __name__ == "__main__":
wave = np.arange(8, dtype="float")
flux = np.arange(8, dtype="float")
sigma = np.arange(8, dtype="float")
band = (np.arange(8)).astype("S")
flux[4] = np.nan
print band
bandList_use = []
bandList_ignore = ["1", "6"]
sed = (list(wave), list(flux), list(sigma), list(band))
sed_select = SED_select_band(sed, bandList_use, bandList_ignore, False)
sed_rest = SED_to_restframe(sed_select, 1.0)
print sed
print sed_select
print sed_rest
| 19,951 | 29.507645 | 147 | py |
Fitter | Fitter-master/sedfit/mcmc/mcmc_multinest.py | import os
from sys import platform
import numpy as np
import pymultinest
import threading, subprocess
from .. import fit_functions as sedff
#The log_likelihood function
lnlike = sedff.logLFunc
#The log_likelihood function using Gaussian process regression
lnlike_gp = sedff.logLFunc_gp
#->Auxillary functions
def show(filepath):
"""
Open the output (pdf) file for the user. Provided by PyMultiNest demo.
"""
if os.name == 'mac' or platform == 'darwin': subprocess.call(('open', filepath))
elif os.name == 'nt' or platform == 'win32': os.startfile(filepath)
elif platform.startswith('linux') : subprocess.call(('xdg-open', filepath))
#->The object to run PyMultiNest
class MultiNestModel(object):
"""
The MCMC model for MultiNest.
"""
def __init__(self, data, model, ModelUnct=False, unctDict=None):
self.__data = data
self.__model = model
self.__modelunct = ModelUnct
self.__unctDict = unctDict
if ModelUnct:
self.__dim = len(model.get_parVaryList()) + 3
#self.__dim = len(model.get_parVaryList()) + 2
print("[MultiNestModel]: ModelUnct is on!")
else:
self.__dim = len(model.get_parVaryList())
print "[MultiNestModel]: ModelUnct is off!"
def prior(self, cube, ndim, nparams):
"""
The prior of all the parameters are uniform.
"""
parDict = self.__model.get_modelParDict()
unctDict = self.__unctDict
counter = 0
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
r1, r2 = parFitDict[parName]["range"]
cube[counter] = (r2 - r1) * cube[counter] + r1 #Uniform distribution
counter += 1
else:
pass
#If the model uncertainty is concerned.
if self.__modelunct:
if unctDict is None:
raise ValueError("No uncertainty model parameter range is provided!")
if self.__data.check_dsData():
rf1, rf2 = unctDict["lnf"]
cube[counter] = (rf2 - rf1) * cube[counter] + rf1
counter += 1
#If there is contiuous data, the residual correlation is considered.
if self.__data.check_csData():
ra1, ra2 = unctDict["lna"]
rt1, rt2 = unctDict["lntau"]
cube[counter] = (ra2 - ra1) * cube[counter] + ra1
counter += 1
cube[counter] = (rt2 - rt1) * cube[counter] + rt1
def loglike(self, cube, ndim, nparams):
params = []
for i in range(ndim):
params.append(cube[i])
#print("The cube is: {0}".format(params))
if self.__modelunct:
return lnlike_gp(params, self.__data, self.__model)
else:
return lnlike(params, self.__data, self.__model)
def run(self, **kwargs):
pymultinest.run(self.loglike, self.prior, self.__dim, self.__dim, **kwargs)
def watch(self, interal, *args, **kwargs):
return threading.Timer(interal, show, *args, **kwargs)
def ProgressPlotter(self, *args, **kwargs):
"""
Generate the ProgressPlotter object which is the son of ProgressWatcher
whose parameters are:
n_params,
interval_ms=200,
outputfiles_basename="chains/1-"
"""
return pymultinest.ProgressPlotter(self.__dim, *args, **kwargs)
def Analyzer(self, outputfiles_basename="chains/1-"):
"""
Generate the Analyser object.
"""
self.analyzer = pymultinest.Analyzer(self.__dim, outputfiles_basename)
return self.analyzer
def PlotMarginalModes(self):
"""
Generate the PlotMarginalModes object.
"""
return pymultinest.PlotMarginalModes(self.analyzer)
| 3,998 | 35.027027 | 88 | py |
Fitter | Fitter-master/sedfit/mcmc/mcmc_emcee.py | import acor
import emcee
import corner
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy.stats import truncnorm
from time import time
from ..SED_Toolkit import WaveFromMicron, WaveToMicron
from .. import fit_functions as sedff
#from .. import fit_functions_erf as sedff
ls_mic = 2.99792458e14 # micron/s
#The log_likelihood function
lnlike = sedff.logLFunc
#The log_likelihood function using Gaussian process regression
lnlike_gp = sedff.logLFunc_gp
def lnprior(params, data, model, ModelUnct, unctDict=None):
"""
Calculate the ln prior probability.
"""
lnprior = 0.0
parIndex = 0
parDict = model.get_modelParDict()
for modelName in model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
parValue = params[parIndex]
parIndex += 1
pr1, pr2 = parFitDict[parName]["range"]
if (parValue < pr1) or (parValue > pr2):
lnprior -= np.inf
else:
pass
#If the model uncertainty is concerned.
if ModelUnct:
if data.check_dsData():
lnf = params[parIndex]
parIndex += 1
if (lnf < unctDict["lnf"][0]) or (lnf > unctDict["lnf"][1]):
lnprior -= np.inf
if data.check_csData():
lna, lntau = params[parIndex:]
if (lna < unctDict["lna"][0]) or (lna > unctDict["lna"][1]):
lnprior -= np.inf
if (lntau < unctDict["lntau"][0]) or (lntau > unctDict["lntau"][1]):
lnprior -= np.inf
return lnprior
def lnprob(params, data, model, ModelUnct, unctDict):
"""
Calculate the probability at the parameter spacial position.
"""
lp = lnprior(params, data, model, ModelUnct, unctDict)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(params, data, model)
def lnprob_gp(params, data, model, ModelUnct, unctDict):
"""
Calculate the probability at the parameter spacial position.
The likelihood function consider the Gaussian process regression.
"""
lp = lnprior(params, data, model, ModelUnct, unctDict)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_gp(params, data, model)
class EmceeModel(object):
"""
The MCMC model for emcee.
"""
def __init__(self, data, model, ModelUnct=False, unctDict=None, sampler=None):
self.__data = data
self.__model = model
self.__modelunct = ModelUnct
self.__unctDict = unctDict
self.__sampler = sampler
print("[EmceeModel]: {0}".format(sampler))
edim = 0
if ModelUnct: #If the uncertainty is modeled, there are some extra parameters.
if data.check_dsData():
edim += 1 #If there is discrete data, the lnf is added.
if data.check_csData():
edim += 2 #If there is continuous data, the lna and lntau are added.
print("[EmceeModel]: ModelUnct is on!")
else: #Else, the number of dimensions is the number of model parameters.
print "[EmceeModel]: ModelUnct is off!"
self.__dim = len(model.get_parVaryList()) + edim
def from_prior(self):
"""
The prior of all the parameters are uniform.
"""
parList = []
parDict = self.__model.get_modelParDict()
unctDict = self.__unctDict
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
parRange = parFitDict[parName]["range"]
parType = parFitDict[parName]["type"]
if parType == "c":
r1, r2 = parRange
p = (r2 - r1) * np.random.rand() + r1 #Uniform distribution
elif parType == "d":
p = np.random.choice(parRange, 1)[0]
else:
raise TypeError("The parameter type '{0}' is not recognised!".format(parType))
parList.append(p)
else:
pass
#If the uncertainty is modeled, there are extra parameters.
if self.__modelunct:
if unctDict is None:
raise ValueError("No uncertainty model parameter range is provided!")
if self.__data.check_dsData():
rf1, rf2 = unctDict["lnf"]
lnf = (rf2 - rf1) * np.random.rand() + rf1
parList.append(lnf)
#If there is contiuous data, the residual correlation is considered.
if self.__data.check_csData():
ra1, ra2 = unctDict["lna"]
rt1, rt2 = unctDict["lntau"]
lna = (ra2 - ra1) * np.random.rand() + ra1
lntau = (rt2 - rt1) * np.random.rand() + rt1
parList.append(lna)
parList.append(lntau)
parList = np.array(parList)
return parList
def EnsembleSampler(self, nwalkers, **kwargs):
if self.__modelunct:
self.__lnprob = lnprob_gp
else:
self.__lnprob = lnprob
self.sampler = emcee.EnsembleSampler(nwalkers, self.__dim, self.__lnprob,
args=[self.__data, self.__model, self.__modelunct, self.__unctDict],
**kwargs)
self.__nwalkers = nwalkers
self.__sampler = "EnsembleSampler"
return self.sampler
def PTSampler(self, ntemps, nwalkers, **kwargs):
if self.__modelunct:
self.__lnlike = lnlike_gp
else:
self.__lnlike = lnlike
self.sampler = emcee.PTSampler(ntemps, nwalkers, self.__dim,
logl=self.__lnlike, logp=lnprior,
loglargs=[self.__data, self.__model],
logpargs=[self.__data, self.__model,
self.__modelunct, self.__unctDict],
**kwargs)
self.__ntemps = ntemps
self.__nwalkers = nwalkers
self.__sampler = "PTSampler"
return self.sampler
def p_ball(self, p0, ratio=5e-2, nwalkers=None):
"""
Generate the positions of parameters around the input positions.
The scipy.stats.truncnorm is used to generate the truncated normal distrubution
of the parameters within the prior ranges.
"""
ndim = self.__dim
if nwalkers is None:
nwalkers = self.__nwalkers
pRange = self.__model.get_parVaryRanges()
unctDict = self.__unctDict
#If the uncertainty is modeled, there are extra parameters.
if self.__modelunct:
if unctDict is None:
raise ValueError("No uncertainty model parameter range is provided!")
if self.__data.check_dsData():
pRange.append(unctDict["lnf"]) #For lnf
#If there is contiuous data, the residual correlation is considered.
if self.__data.check_csData():
pRange.append(unctDict["lna"]) #For lna
pRange.append(unctDict["lntau"]) #For lntau
pRange = np.array(pRange)
sampler = self.__sampler
if sampler == "EnsembleSampler":
p = np.zeros((nwalkers, ndim))
for d in range(ndim):
r0, r1 = pRange[d]
std = (r1 - r0) * ratio
loc = p0[d]
a = (r0 - loc) / std
b = (r1 - loc) /std
p[:, d] = truncnorm.rvs(a=a, b=b, loc=loc, scale=std, size=nwalkers)
if sampler == "PTSampler":
ntemps = self.__ntemps
p = np.zeros((ntemps, nwalkers, ndim))
for t in range(ntemps):
for d in range(ndim):
r0, r1 = pRange[d]
std = (r1 - r0) * ratio
loc = p0[d]
a = (r0 - loc) / std
b = (r1 - loc) /std
p[t, :, d] = truncnorm.rvs(a=a, b=b, loc=loc, scale=std, size=nwalkers)
return p
def p_prior(self):
"""
Generate the positions in the parameter space from the prior.
For EnsembleSampler, the result p0 shape is (nwalkers, dim).
For PTSampler, the result p0 shape is (ntemps, nwalker, dim).
"""
sampler = self.__sampler
nwalkers = self.__nwalkers
dim = self.__dim
if sampler == "EnsembleSampler":
p0 = [self.from_prior() for i in range(nwalkers)]
elif sampler == "PTSampler":
ntemps = self.__ntemps
p0 = np.zeros((ntemps, nwalkers, dim))
for loop_t in range(ntemps):
for loop_w in range(nwalkers):
p0[loop_t, loop_w, :] = self.from_prior()
else:
raise ValueError("The sampler '{0}' is unrecognised!".format(sampler))
return p0
def p_logl_max(self, chain=None, lnlike=None, QuietMode=True):
"""
Find the position in the sampled parameter space that the likelihood is
the highest.
"""
sampler = self.__sampler
if (not chain is None) and (not lnlike is None):
if not QuietMode:
print("The chain and lnlike are provided!")
else:
if sampler == "EnsembleSampler":
chain = self.sampler.chain
lnlike = self.sampler.lnprobability
else:
chain = self.sampler.chain[0, ...]
lnlike = self.sampler.lnlikelihood[0, ...]
idx = lnlike.ravel().argmax()
p = chain.reshape(-1, self.__dim)[idx]
return p
def p_logl_min(self):
"""
Find the position in the sampled parameter space that the likelihood is
the lowest.
"""
sampler = self.__sampler
if sampler == "EnsembleSampler":
chain = self.sampler.chain
lnlike = self.sampler.lnprobability
else:
chain = self.sampler.chain[0, ...]
lnlike = self.sampler.lnlikelihood[0, ...]
idx = lnlike.ravel().argmin()
p = chain.reshape(-1, self.__dim)[idx]
return p
def get_logl(self, p):
"""
Get the likelihood at the given position.
"""
sampler = self.__sampler
if sampler == "EnsembleSampler":
return self.__lnprob(p, self.__data, self.__model, self.__modelunct,
self.__unctDict)
elif sampler == "PTSampler":
return self.__lnlike(p, self.__data, self.__model)
else:
raise ValueError("'{0}' is not recognised!".format(sampler))
def run_mcmc(self, pos, iterations, printFrac=1, quiet=False, **kwargs):
"""
Run the MCMC chain.
This function just wraps up the sampler.sample() so that there is output
in the middle of the run.
"""
sampler = self.__sampler
if not quiet:
print("MCMC ({0}) is running...".format(sampler))
t0 = time()
#Notice that the third parameters yielded by EnsembleSampler and PTSampler are different.
for i, (pos, lnprob, logl) in enumerate(self.sampler.sample(pos, iterations=iterations, **kwargs)):
if not (i + 1) % int(printFrac * iterations):
if quiet:
pass
else:
progress = 100. * (i + 1) / iterations
if sampler == "EnsembleSampler":
lnlike = lnprob
pos0 = pos
elif sampler == "PTSampler":
#->Only choose the zero temperature chain
lnlike = logl[0, ...]
pos0 = pos[0, ...]
idx = lnlike.argmax()
lmax = lnlike[idx]
lmin = lnlike.min()
pmax = pos0.reshape((-1, self.__dim))[idx]
pname = self.__model.get_parVaryNames(latex=False)
print("-----------------------------")
print("[{0:<4.1f}%] lnL_max: {1:.3e}, lnL_min: {2:.3e}".format(progress, lmax, lmin))
for p, name in enumerate(pname):
print("{0:18s} {1:10.3e}".format(name, pmax[p]))
print( "**MCMC time elapsed: {0:.3f} min".format( (time()-t0)/60. ) )
if not quiet:
print("MCMC finishes!")
return pos, lnprob, logl
def integrated_time(self):
"""
Estimate the integrated autocorrelation time of a time series.
Since it seems there is something wrong with the sampler.integrated_time(),
I have to calculated myself using acor package.
"""
sampler = self.__sampler
if sampler == "EnsembleSampler":
chain = self.sampler.chain
elif sampler == "PTSampler":
chain = self.sampler.chain[0, ...]
else:
raise ValueError("{0} is an unrecognised sampler!".format(sampler))
tauParList = []
for npar in range(self.__dim):
tauList = []
for nwal in range(self.__nwalkers):
pchain = chain[nwal, :, npar]
try:
tau, mean, sigma = acor.acor(pchain)
except:
tau = np.nan
tauList.append(tau)
tauParList.append(tauList)
return tauParList
def accfrac_mean(self):
"""
Return the mean acceptance fraction of the sampler.
"""
return np.mean(self.sampler.acceptance_fraction)
def posterior_sample(self, burnin=0, fraction=25):
"""
Return the samples merging from the chains of all the walkers.
"""
sampler = self.sampler
nwalkers = self.__nwalkers
if self.__sampler == "EnsembleSampler":
chain = sampler.chain
lnprob = sampler.lnprobability[:, -1]
elif self.__sampler == "PTSampler":
chain = np.squeeze(sampler.chain[0, ...])
lnprob = np.squeeze(sampler.lnprobability[0, :, -1])
if burnin > (chain.shape[1]/2.0):
raise ValueError("The burn-in length ({0}) is too long!".format(burnin))
if fraction>0:
lnpLim = np.percentile(lnprob, fraction)
fltr = lnprob >= lnpLim
print("ps: {0}/{1} walkers are selected.".format(np.sum(fltr), nwalkers))
samples = chain[fltr, burnin:, :].reshape((-1, self.__dim))
else:
samples = chain[:, burnin:, :].reshape((-1, self.__dim))
return samples
def p_median(self, ps=None, **kwargs):
"""
Return the median value of the parameters according to their posterior
samples.
"""
if ps is None:
ps = self.posterior_sample(**kwargs)
else:
pass
parMedian = np.median(ps, axis=0)
return parMedian
def p_uncertainty(self, low=1, center=50, high=99, burnin=50, ps=None, **kwargs):
"""
Return the uncertainty of each parameter according to its posterior sample.
"""
if ps is None:
ps = self.posterior_sample(burnin=burnin, **kwargs)
else:
pass
parRange = np.percentile(ps, [low, center, high], axis=0)
return parRange
def print_parameters(self, truths=None, low=1, center=50, high=99, **kwargs):
"""
Print the ranges of the parameters according to their posterior samples
and the values of the maximum a posterior (MAP).
"""
nameList = self.__model.get_parVaryNames(latex=False)
parRange = self.p_uncertainty(low, center, high, **kwargs)
if self.__modelunct:
if self.__data.check_dsData():
nameList.append("lnf")
if self.__data.check_csData():
nameList.append("lna")
nameList.append("lntau")
pMAP = self.p_logl_max()
ttList = ["Name", "L ({0}%)".format(low),
"C ({0}%)".format(center),
"H ({0}%)".format(high), "MAP"]
if not truths is None:
ttList.append("Truth")
tt = " ".join(["{0:12s}".format(i) for i in ttList])
print("{:-<74}".format(""))
print(tt)
for d in range(self.__dim):
plow = parRange[0, d]
pcen = parRange[1, d]
phgh = parRange[2, d]
pmax = pMAP[d]
name = nameList[d]
if (pmax < plow) or (pmax > phgh):
print("[MCMC Warning]: The best-fit '{0}' is not consistent with its posterior sample".format(name))
pl = [plow, pcen, phgh]
info = "{0:12s} {1[0]:<12.3e} {1[1]:<12.3e} {1[2]:<12.3e} {2:<12.3e}".format(name, pl, pmax)
if truths is None:
print(info)
else:
print(info+" {0:<12.3e}".format(truths[d]))
p_logl_max = self.p_logl_max()
print("lnL_max: {0:.3e}".format(self.get_logl(p_logl_max)))
def Save_Samples(self, filename, **kwargs):
"""
Save the posterior samples.
"""
samples = self.posterior_sample(**kwargs)
np.savetxt(filename, samples, delimiter=",")
def Save_BestFit(self, filename, low=1, center=50, high=99, **kwargs):
nameList = self.__model.get_parVaryNames(latex=False)
parRange = self.p_uncertainty(low, center, high, **kwargs)
if self.__modelunct:
if self.__data.check_dsData():
nameList.append("lnf")
if self.__data.check_csData():
nameList.append("lna")
nameList.append("lntau")
pMAP = self.p_logl_max()
ttList = ["Name", "L ({0}%)".format(low),
"C ({0}%)".format(center),
"H ({0}%)".format(high), "MAP"]
tt = " ".join(["{0:12s}".format(i) for i in ttList])
fp = open(filename, "w")
fp.write(tt+"\n")
for d in range(self.__dim):
plow = parRange[0, d]
pcen = parRange[1, d]
phgh = parRange[2, d]
pmax = pMAP[d]
name = nameList[d]
pl = [plow, pcen, phgh]
info = "{0:12s} {1[0]:<12.3e} {1[1]:<12.3e} {1[2]:<12.3e} {2:<12.3e}".format(name, pl, pmax)
fp.write(info+"\n")
p_logl_max = self.p_logl_max()
fp.write("#lnL_max: {0:.3e}".format(self.get_logl(p_logl_max)))
def plot_corner(self, filename=None, burnin=0, fraction=25, ps=None, nuisance=True, **kwargs):
"""
Plot the corner diagram that illustrate the posterior probability distribution
of each parameter.
"""
if ps is None:
ps = self.posterior_sample(burnin, fraction)
parname = self.__model.get_parVaryNames()
if self.__modelunct:
if self.__data.check_dsData():
parname.append(r"$\mathrm{ln}f$")
if self.__data.check_csData():
parname.append(r"$\mathrm{ln}a$")
parname.append(r"$\mathrm{ln}\tau$")
nNui = 3 #The number of nuisance parameters
else:
nNui = 0
if nuisance:
dim = self.__dim
else:
dim = self.__dim - nNui
fig = corner.corner(ps[:, 0:dim], labels=parname[0:dim], **kwargs)
if filename is None:
return fig
else:
plt.savefig(filename)
plt.close()
def plot_fit(self, ps=None, filename=None, nSamples=100, truths=None, FigAx=None,
xlim=None, ylim=None, showLegend=True, cList=None, cLineKwargs={},
tLineKwargs={}, xUnits="micron", yUnits="fnu", **kwargs):
"""
Plot the best-fit model and the data.
"""
sedData = self.__data
sedModel = self.__model
#-->Plot the SED data
fig, ax = sedData.plot_sed(FigAx=FigAx, xUnits=xUnits, yUnits=yUnits)
#-->Plot the models
if cList is None:
cList = ["green", "magenta", "orange", "blue", "yellow", "cyan"]
ncolor = len(cList)
#->Plot the sampled model variability
if ps is None:
ps = self.posterior_sample(**kwargs)
cKwargs = { #The line properties of the model components.
"linestyle": cLineKwargs.get("ls_uc", "--"),
"alpha": cLineKwargs.get("alpha_uc", 0.1),
"linewidth": cLineKwargs.get("lw_uc", 0.5),
}
tKwargs = { #The line properties of the model total.
"linestyle": tLineKwargs.get("ls_uc", "-"),
"alpha": tLineKwargs.get("alpha_uc", 0.1),
"linewidth": tLineKwargs.get("lw_uc", 0.5),
"color": tLineKwargs.get("color", "red"),
}
for pars in ps[np.random.randint(len(ps), size=nSamples)]:
sedModel.updateParList(pars)
sedModel.plot(FigAx=(fig, ax), colorList=cList, DisplayPars=False,
cKwargs=cKwargs, tKwargs=tKwargs, useLabel=False,
xUnits=xUnits, yUnits=yUnits)
#->Plot the best-fit model
#Plot the data and model photometric points on the top of lines.
pcnt = self.p_median(ps, **kwargs)
waveModel = sedModel.get_xList()
sedModel.updateParList(pcnt)
ycnt = sedModel.combineResult() #The best-fit model
yPhtC = np.array( sedData.model_pht(waveModel, ycnt) ) #The best-fit band average flux density
cKwargs = { #The line properties of the model components.
"linestyle": cLineKwargs.get("ls_bf", "--"),
"alpha": cLineKwargs.get("alpha_bf", 1.0),
"linewidth": cLineKwargs.get("lw_bf", 1.0),
}
tKwargs = { #The line properties of the model total.
"linestyle": tLineKwargs.get("ls_bf", "-"),
"alpha": tLineKwargs.get("alpha_bf", 1.0),
"linewidth": tLineKwargs.get("lw_bf", 3.0),
"color": tLineKwargs.get("color", "red"),
}
sedModel.plot(FigAx=(fig, ax), colorList=cList, DisplayPars=False,
cKwargs=cKwargs, tKwargs=tKwargs,
xUnits=xUnits, yUnits=yUnits)
xPhtC = WaveFromMicron(sedData.get_dsList("x"), xUnits)
y_conv = ls_mic / WaveToMicron(xPhtC, xUnits) * 1.e-26
if yUnits == "fnu":
pass
elif yUnits == "nufnu":
yPhtC = y_conv * yPhtC # Convert to erg/s/cm^2
else:
raise ValueError("The yUnits ({0}) is not recognised!".format(yUnits))
ax.plot(xPhtC, yPhtC, marker="s", color="k", mfc="none",
mec="k", mew=1.5, alpha=0.8, linestyle="none", label="Model")
#->Plot the truth model if provided
if not truths is None:
sedModel.updateParList(truths)
sedModel.plot(FigAx=(fig, ax), colorList=cList, DisplayPars=False,
xUnits=xUnits, yUnits=yUnits)
#->Setup the figure
if not xlim is None:
ax.set_xlim(xlim)
else:
xmin = np.min(xPhtC) / 5.
xmax = np.max(xPhtC) * 5.
ax.set_xlim([xmin, xmax])
if not ylim is None:
ax.set_ylim(ylim)
else:
ymin = np.min(yPhtC) / 5.
ymax = np.max(yPhtC) * 5.
ax.set_ylim([ymin, ymax])
if filename is None:
return (fig, ax)
else:
plt.savefig(filename, bbox_inches="tight")
plt.close()
def plot_chain(self, filename=None, truths=None):
dim = self.__dim
sampler = self.sampler
nameList = self.__model.get_parVaryNames()
if self.__modelunct:
if self.__data.check_dsData():
nameList.append(r"$\mathrm{ln}f$")
if self.__data.check_csData():
nameList.append(r"$\mathrm{ln}a$")
nameList.append(r"$\mathrm{ln}\tau$")
if self.__sampler == "EnsembleSampler":
chain = sampler.chain
elif self.__sampler == "PTSampler":
chain = np.squeeze(sampler.chain[0, ...])
fig, axes = plt.subplots(dim, 1, sharex=True, figsize=(8, 3*dim))
for loop in range(dim):
axes[loop].plot(chain[:, :, loop].T, color="k", alpha=0.4)
axes[loop].yaxis.set_major_locator(MaxNLocator(5))
if not truths is None:
axes[loop].axhline(truths[loop], color="r", lw=2)
axes[loop].set_ylabel(nameList[loop], fontsize=24)
if filename is None:
return (fig, axes)
else:
plt.savefig(filename)
plt.close()
def plot_lnlike(self, filename=None, iterList=[0.5, 0.8, 1.0], **kwargs):
if self.__sampler == "EnsembleSampler":
lnprob = self.sampler.lnprobability
elif self.__sampler == "PTSampler":
lnprob = self.sampler.lnprobability[0, ...]
_, niter = lnprob.shape
iterList = np.around(niter * np.array(iterList)) - 1
fig = plt.figure()
for i in iterList:
l = lnprob[:, int(i)]
plt.hist(l[~np.isinf(l)], label="iter: {0}".format(i), **kwargs)
plt.legend(loc="upper left")
if filename is None:
ax = plt.gca()
return (fig, ax)
else:
plt.savefig(filename)
plt.close()
def reset(self):
"""
Reset the sampler, for completeness.
"""
self.sampler.reset()
def diagnose(self):
"""
Diagnose whether the MCMC run is reliable.
"""
nameList = self.__model.get_parVaryNames(latex=False)
if self.__modelunct:
if self.__data.check_dsData():
nameList.append("lnf")
if self.__data.check_csData():
nameList.append("lna")
nameList.append("lntau")
print("---------------------------------")
print("Mean acceptance fraction: {0:.3f}".format(self.accfrac_mean()))
print("PN : ACT (min-max)")
it = self.integrated_time()
for loop in range(self.__dim):
itPar = it[loop]
print("{0:9s}: {i[0]:.3f}-{i[1]:.3f}".format(nameList[loop], i=[min(itPar), max(itPar)]))
def sampler_type(self):
return self.__sampler
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
def __del__(self):
del self.__data
del self.__model
del self.__modelunct
del self.__unctDict
del self.__sampler
parList = self.__dict__.keys()
if "sampler" in parList:
del self.sampler
| 27,070 | 39.224368 | 116 | py |
Fitter | Fitter-master/sedfit/mcmc/__init__.py | 1 | 0 | 0 | py |
|
Fitter | Fitter-master/sedfit/mcmc/mcmc_dnest4.py | #This script is not ready...
#
import os
from sys import platform
import numpy as np
import pymultinest
import threading, subprocess
from .. import fit_functions as sedff
#The log_likelihood function
lnlike = sedff.logLFunc
#The log_likelihood function using Gaussian process regression
lnlike_gp = sedff.logLFunc_gp
#DNest4 model#
#------------#
#The class follow the format of DNest4
#The combination of a number of models
def Model2Data_Naive(model, data):
"""
The function gets the model values that can directly compare with the data.
"""
if not isinstance(data, DataSet):
raise ValueError("The data is incorrect!")
if not isinstance(model, ModelCombiner):
raise ValueError("The model is incorrect!")
x = np.array(data.get_List("x"))
y = model.combineResult(x)
return y
#The log_likelihood function: naive one
def logLFunc_naive(params, data, model):
"""
This is the simplest log likelihood function.
"""
model.updateParList(params)
nParVary = len(model.get_parVaryList())
y = np.array(data.get_List("y"))
e = np.array(data.get_List("e"))
ym = np.array(Model2Data_Naive(model, data))
if len(params) == nParVary:
s = e
elif len(params) == (nParVary+1):
f = np.exp(params[nParVary]) #The last par is lnf.
s = (e**2 + (ym * f)**2)**0.5
else:
raise ValueError("The length of params is incorrect!")
#Calculate the log_likelihood
logL = -0.5 * np.sum( (y - ym)**2 / s**2 + np.log(2 * np.pi * s**2) )
return logL
#The DNest4 model class
class DNest4Model(object):
"""
Specify the model
"""
def __init__(self, data, model, logl=logLFunc_naive, ModelUnct=False):
if isinstance(data, DataSet):
self.__data = data
else:
raise TypeError("The data type should be DataSet!")
if isinstance(model, ModelCombiner):
self.__model = model
else:
raise TypeError("The model type should be ModelCombiner!")
if isinstance(logl, types.FunctionType):
self._logl = logl
else:
raise TypeError("The model type should be a function!")
if isinstance(ModelUnct, types.BooleanType):
self.__modelunct = ModelUnct
if ModelUnct:
print "[DNest4Model]: ModelUnct is on!"
else:
print "[DNest4Model]: ModelUnct is off!"
else:
raise TypeError("The ModelUnct type should be Boolean!")
def from_prior(self):
"""
The prior of all the parameters are uniform.
"""
parList = []
parDict = self.__model.get_modelParDict()
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
parRange = parFitDict[parName]["range"]
parType = parFitDict[parName]["type"]
if parType == "c":
#print "[DN4M]: continual"
r1, r2 = parRange
p = (r2 - r1) * rng.rand() + r1 #Uniform distribution
elif parType == "d":
#print "[DN4M]: discrete"
p = np.random.choice(parRange, 1)[0]
else:
raise TypeError("The parameter type '{0}' is not recognised!".format(parType))
parList.append(p)
else:
pass
#If the model uncertainty is concerned.
if self.__modelunct:
lnf = 20.0 * rng.rand() - 10.0
parList.append(lnf)
parList = np.array(parList)
return parList
def perturb(self, params):
"""
Each step we perturb all the parameters which is more effective from
computation point of view.
"""
logH = 0.0
parDict = self.__model.get_modelParDict()
pIndex = 0
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
parRange = parFitDict[parName]["range"]
parType = parFitDict[parName]["type"]
if parType == "c":
#print "[DN4M]: continual"
r1, r2 = parRange
p0 = params[pIndex]
#p0 += (r2 - r1) * dnest4.randh() #Uniform distribution
p0 += (r2 - r1) * dnest4.randh() / 2.0 #Uniform distribution
params[pIndex] = dnest4.wrap(p0, r1, r2)
if (params[pIndex] < r1) or (params[pIndex] > r2):
logH -= np.inf
elif parType == "d":
#print "[DN4M]: discrete"
rangeLen = len(parRange)
iBng = parRange.index(params[pIndex])
#iPro = int( iBng + rangeLen * dnest4.randh() ) #Uniform distribution
iPro = int( iBng + rangeLen * dnest4.randh() / 2.0 ) #Uniform distribution
iPar = dnest4.wrap(iPro, 0, rangeLen)
params[pIndex] = parRange[iPar]
if not params[pIndex] in parRange:
logH -= np.inf
else:
raise TypeError("The parameter type '{0}' is not recognised!".format(parType))
parFitDict[parName]["value"] = params[pIndex]
pIndex += 1
else:
pass
if len(params) == (pIndex+1):
p0 = params[pIndex]
p0 += 20.0 * dnest4.randh()
params[pIndex] = dnest4.wrap(p0, -10.0, 10.0)
if (params[pIndex] < -10.0) or (params[pIndex] > 10.0):
logH -= np.inf
return logH
def log_likelihood(self, params):
"""
Gaussian sampling distrubution.
"""
logL = self._logl(params, self.__data, self.__model)
return logL
| 6,241 | 36.60241 | 102 | py |
Fitter | Fitter-master/sedfit/models/model_torus_template.py | import numpy as np
from astropy.table import Table
if __name__ == "__main__":
from sedfit.dir_list import template_path
else:
from ..dir_list import template_path
from scipy.interpolate import interp1d
Mpc = 3.08567758e24 #unit: cm
mJy = 1e26 #unit: erg/s/cm^2/Hz
pi = np.pi
torus_total_tb = Table.read(template_path+"torus_templates/torus_total_template.dat",
format="ascii.ipac")
torus_clumpy_tb = Table.read(template_path+"torus_templates/torus_clumpy_template.dat",
format="ascii.ipac")
wavelength = torus_total_tb["wavelength"].data
flux_total = torus_total_tb["flux"].data
flux_clumpy = torus_clumpy_tb["flux"].data
func_total = interp1d(wavelength, flux_total)
func_clumpy = interp1d(wavelength, flux_clumpy)
waveLim = [1e-1, 1e3]
def Torus_Template(logLtorus, DL, wave, z, frame="rest", ttype="total", waveLim=waveLim):
"""
Calculate the torus emission with the dust torus templates.
Parameters
----------
logLtorus : float
The log10 of the torus luminosity, unit: erg/s.
DL : float
The luminosity distance, unit: Mpc.
wave : float array
The wavelength at which we want to calculate the flux, unit: micron.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
ttype : string
"total" for the CLUMPY+blackbody template and "clumpy" for CLUMPY template only.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : array of float
The flux density (F_nu) from the model, unit: mJy.
Notes
-----
None.
"""
flux = np.zeros_like(wave)
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
if ttype == "total":
fluxFunc = func_total
elif ttype == "clumpy":
fluxFunc = func_clumpy
else:
raise ValueError("The template type ({0}) is not recognised!".format(t))
flux[fltr] = fluxFunc(wave[fltr])
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
fnu = (1.0 + z)**idx * flux * 10**logLtorus / (4 * pi * (DL * Mpc)**2) * mJy
return fnu
if __name__ == "__main__":
import matplotlib.pyplot as plt
wave = wavelength
flux = Torus_Template(40, 200, wave, 0)
plt.plot(wave, flux)
plt.xscale("log")
plt.yscale("log")
plt.show()
| 2,547 | 30.45679 | 89 | py |
Fitter | Fitter-master/sedfit/models/model_extinction.py | import numpy as np
from extinction import calzetti00
waveLim = [0.12, 2.2] # units: Micron
def Calzetti00(Av, wave, Rv=3.1, waveLim=waveLim, QuietMode=True):
"""
Calculate the extinction that is directly applied to the flux:
10**(-0.4 * A_lambda).
For the input wavelength out of the effective range (waveLim), 1 will be
returned, since this is multiplied on the fluxes.
Parameters
----------
Av : float
The Av, V band extinction.
wave : array
The wavelength to calculate the extinction, units: micron.
Rv : float
Ratio of total to selective extinction, A_V / E(B-V).
Returns
-------
f0 : array
The ratio of the fluxes after and before the extinction.
Notes
-----
None.
"""
#-> Check the wavelength coverage.
fltr = (wave >= waveLim[0]) & (wave <= waveLim[1])
#-> If the wavelength is fully out of the effective range.
if np.sum(fltr) == 0:
if not QuietMode:
print("Warning (c2000): The input wavelength is out of the effective range!")
return np.ones_like(wave)
#-> Calculate the extinction within the effective regime.
wave_aa = wave[fltr] * 1e4
av = calzetti00(wave_aa, Av, Rv)
f0 = np.ones_like(wave)
f0[fltr] = 10**(-0.4 * av)
return f0
if __name__ == "__main__":
import extinction
import matplotlib.pyplot as plt
Rv = 3.1
wave = np.logspace(np.log10(910.), np.log10(30000.), 2000)
a_lambda = {
'ccm89': extinction.ccm89(wave, 1.0, Rv),
'odonnell94': extinction.odonnell94(wave, 1.0, Rv),
'fitzpatrick99': extinction.fitzpatrick99(wave, 1.0),
"c00": extinction.calzetti00(wave, 1.0, Rv),
'fm07': extinction.fm07(wave, 1.0)
}
for mn in a_lambda.keys():
ext = a_lambda[mn]
plt.plot(wave, ext, label=mn)
c00_2 = -2.5 * np.log10(Calzetti00(2., wave/1e4, Rv))
c00_3 = extinction.calzetti00(wave, 2.0, Rv)
plt.plot(wave, c00_2, color="turquoise")
plt.plot(wave, c00_3, color="tomato", linestyle=":")
plt.xscale("log")
plt.legend(loc="upper right")
plt.show()
| 2,148 | 31.074627 | 89 | py |
Fitter | Fitter-master/sedfit/models/model_mir_extinction.py | # Add extinction function
import numpy as np
from scipy import interpolate
from ..dir_list import template_path
f = np.loadtxt(template_path+'tau_lambda_kemper_new.txt')
xaxis = f[:, 0]
yaxis = f[:, 1]
k = interpolate.interp1d(xaxis,yaxis,kind='cubic')
def Smith07(logtau, wave):
"""
This function adopts the extinction curve from Smith et al. (2007), section
4.1.8, and extrapolated to 1000 micron by Mingyang Zhuang.
"""
tempy1 = []; tempy2 = []
extin_x = []
for each in wave:
if each < xaxis[0]:
tempy1.append(0)
elif each > xaxis[-1]:
tempy2.append(0)
else:
extin_x.append(each)
final_y = k(extin_x)
extinction_list = np.concatenate((tempy1,final_y))
extinction_list = np.concatenate((extinction_list, tempy2))
ratio = np.exp(-10**logtau*extinction_list)
return ratio
| 883 | 27.516129 | 79 | py |
Fitter | Fitter-master/sedfit/models/model_dl07.py | import numpy as np
import cPickle as pickle
from ..fitter.template import Template
from scipy.interpolate import splev
from ..dir_list import template_path
Msun = 1.9891e33 #unit: gram
Mpc = 3.08567758e24 #unit: cm
m_H = 1.6726219e-24 #unit: gram
fp = open(template_path+"dl07_kdt_mw.tmplt")
tp_dl07 = pickle.load(fp)
fp.close()
tdl07 = Template(**tp_dl07)
modelInfo = tdl07.get_modelInfo()
qpahList = modelInfo["qpah"]
mdust2mh = modelInfo["mdmh"]
waveLim = [1.0, 1e4]
def DL07(logumin, logumax, qpah, loggamma, logMd, DL, z, wave, frame="rest", t=tdl07, waveLim=waveLim):
"""
This function generates the dust emission template from Draine & Li (2007).
Parameters
----------
logumin : float
The minimum radiation field intensity in log10.
logumax : float
The maximum radiation field intensity in log10.
qpah : float
The PAH fraction of the dust.
loggamma : float
The fraction of the dust in the PDR in log10.
logMd : float
The log10 of the dust mass in the unit of solar mass.
DL : float
The luminosity distance.
z : float
The redshift.
wave : float array
The wavelengths of the output flux.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : Template object
The template of DL07 model provided by user.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : float array
The flux density of the model.
Notes
-----
None.
"""
umin = 10**logumin
umax = 10**logumax
gamma = 10**loggamma
pmin = t.get_nearestParameters([umin, umin, qpah])
qpah_min = pmin[2]
ppl = [umin, umax, qpah_min] # In order to avoid inconsistency, the qpah of
# the pl component is matched to that of the
# min component.
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
jnu_min = t(wave[fltr], pmin)
jnu_pl = t(wave[fltr], ppl)
mdmh = mdust2mh[qpahList.index(qpah_min)]
jnu = (1 - gamma) * jnu_min + gamma * jnu_pl
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
flux = np.zeros_like(wave)
flux[fltr] = (1 + z)**idx * 10**logMd * Msun/m_H * jnu/(DL * Mpc)**2 / mdmh * 1e3 #unit: mJy
return flux
def DL07_PosPar(logumin, logumax, qpah, loggamma, logMd, t=tdl07):
"""
To position the parameters in the parameter grid. If true, the function
will return the paramteter grid found nearest from the input parameters
using the KDTree template.
Parameters
----------
logumin : float
The minimum radiation field intensity in log10.
logumax : float
The maximum radiation field intensity in log10.
qpah : float
The PAH fraction of the dust.
loggamma : float
The fraction of the dust in the PDR, in log10.
logMd : float
The log10 of the dust mass in the unit of solar mass.
tmpl_dl07 : numpy.ndarray
The template of DL07 model provided by user.
Returns
-------
param : list
The parameters of the template used for the input parameters.
Notes
-----
None.
"""
umin = 10**logumin
umax = 10**logumax
pmin = [umin, umin, qpah]
ppl = [umin, umax, qpah]
parMin = t.get_nearestParameters(pmin)
parPl = t.get_nearestParameters(ppl)
if parMin[2] != parPl[2]:
raise RuntimeError("The DL07 model is inconsistent!")
parDict = {
"logumin": np.log10(parPl[0]),
"logumax": np.log10(parPl[1]),
"qpah": parPl[2],
"loggamma": loggamma,
"logMd": logMd
}
return parDict
| 3,906 | 29.286822 | 103 | py |
Fitter | Fitter-master/sedfit/models/model_cat3d_H.py | import numpy as np
import cPickle as pickle
from ..fitter.template import Template
from scipy.interpolate import splev
from ..dir_list import template_path
Msun = 1.9891e33 #unit: gram
Mpc = 3.08567758e24 #unit: cm
m_H = 1.6726219e-24 #unit: gram
r0 = 1.1 # pc
fp = open(template_path+"Cat3d_H.tmplt")
tp_cat3d_H = pickle.load(fp)
fp.close()
tcat3d_H = Template(**tp_cat3d_H)
waveLim = [1.0, 1e4]
def Cat3d_H(a, h, N0, i, logL, DL, z, wave, frame="rest", t=tcat3d_H, waveLim=waveLim):
"""
This function generates the modified CLUMPY torus radiation from Garcia-Gonzalez et al. 2017.
Parameters
----------
a : float
The index of the radial dust cloud distribution power law.
N0 : float
The number of clouds along an equatorial line-of-sight.
h : float
Vertical Gaussian distribution dimensionless scale height.
i : degree
Inclination angle.
logL : float
UV luminosity erg/s in log.
DL : float
The luminosity distance.
z : float
The redshift.
wave : float array
The wavelengths of the output flux.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : Template object
The template of DL07 model provided by user.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : float array
The flux density of the model.
Notes
-----
None.
"""
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
para = [a, h, N0, i]
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
f0 = (1 + z)**idx * 10**(logL - 46) * (r0 / DL * 1e-6)**2
flux = np.zeros_like(wave)
flux[fltr] = f0 * t(wave[fltr], para) * 1e29 # unit: mJy
return flux
def Cat3d_H_PosPar(a, h, N0, i, logL, t=tcat3d_H):
"""
To position the parameters in the parameter grid. If true, the function
will return the paramteter grid found nearest from the input parameters
using the KDTree template.
Parameters
----------
a : float
The index of the radial dust cloud distribution power law.
N0 : float
The number of clouds along an equatorial line-of-sight.
h : float
Vertical Gaussian distribution dimensionless scale height.
i : degree
Inclination angle.
logL : float
Torus luminosity erg/s in log.
t : Template object
The template of torus model provided by user.
logtau_torus : float
Scale of 9.7 micron extinction strength
Returns
-------
param : list
The parameters of the template used for the input parameters.
Notes
-----
None.
"""
par = [a, h, N0, i]
Par = t.get_nearestParameters(par)
Pardict = {
'a': Par[0],
'h': Par[1],
'N0': Par[2],
'i': Par[3],
'logL': logL
}
return Pardict
| 3,104 | 25.538462 | 97 | py |
Fitter | Fitter-master/sedfit/models/Radiation_Model_Toolkit.py |
# coding: utf-8
# # This page is to release the functions of radiation models
# * The prototype of this page is [SEDToolKit](http://localhost:8888/notebooks/SEDFitting/SEDToolKit.ipynb) in /Users/jinyi/Work/PG_QSO/SEDFitting/
# In[2]:
import numpy as np
# In[1]:
#Func_bgn:
#----------------------------------#
# by SGJY, Dec. 13, 2015 #
#----------------------------------#
def Single_Planck(nu, T=1e4):
'''
This is a single Planck function.
The input parameter are frequency (nu) with unit Hz and temperature (T) with unit K.
The system of units is Gaussian units, thus the brightness has unit erg/s/cm^2/Hz/ster.
'''
h = 6.62606957e-27 #erg s
c = 29979245800.0 #cm/s
k = 1.3806488e-16 #erg/K
Bnu = 2.0*h*nu**3.0/c**2.0 / (np.exp(h*nu/k/T) - 1.0)
return Bnu
#Func_end
#Func_bgn:
#----------------------------------#
# by SGJY, Dec. 13, 2015 #
#----------------------------------#
def Power_Law(nu, alpha, sf):
'''
This is a power-law function.
nu is frequency.
alpha is the power index.
sf is the scaling factor.
The results are in the units erg/s/cm^2/Hz.
'''
return sf * nu**(-alpha)
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Apr. 3, 2016 #
#-------------------------------------#
def BlackBody(nu, logOmega, T=1e4):
'''
This function calculate the blackbody emission given temperature T. The max
flux is normalised to 1 if logSF=0.
Parameter:
----------
nu : float array
Frequency with unit Hz.
logOmega : float
The log10 of the solid angle subtended by the emitter.
T : float
The temperature.
Returns
-------
flux : float array
The flux density (F_nu) calculated from the model, unit: erg/s/cm^2/Hz.
Notes
-----
None.
'''
flux_sp = Single_Planck(nu, T)
flux = 10**logOmega * flux_sp
return flux
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Feb. 20, 2016 #
#-------------------------------------#
def Dust_Modified_BlackBody(nu, logM, DL, beta, T, z=0.0, frame="rest", kappa0=16.2, lambda0=140):
'''
This function calculate the dust emission assuming it is modified blackbody.
The calculation is in the cgs system.
Parameters
----------
nu : array
The frequency corresponding to the flux.
logM : float
The dust mass, in the unit solar mass in log10.
DL : float
The luminosity distance, in the unit Mpc.
beta : float
The dust emissivity.
T : float
The blackbody temperature.
z : float
The redshift, default: 0.0
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
kappa0 : float, default: 16.2
The opacity for reference, by default we choose the value at 140
micron (Li & Draine 2001).
lambda0 : float, default: 140
The wavelength at which we use the opacity as reference, by default
we choose 140 micron.
Returns
-------
flux : array
The flux at the corresponding frequences, in the unit mJy.
Notes
-----
None.
'''
ls_mic = 2.99792458e14 #micron/s
Msun = 1.9891e33 #solar mass in gram
Mpc = 3.08567758e24 #1 megaparsec in centimeter
mJy = 1e-26 #1 mJy in erg/s/cm^2/Hz
nu0 = ls_mic/lambda0
kappa = kappa0 * (nu/nu0)**beta
mbb = Single_Planck(nu, T)
Md = Msun * 10**logM
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
flux = (1 + z)**idx * Md * kappa * mbb / (DL*Mpc)**2 / mJy
return flux
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, Oct. 30, 2016 #
#-------------------------------------#
def Line_Profile_Gaussian(nu, flux, nu0, FWHM, units="v", norm="peak"):
"""
Calculate the flux density of the emission line with a Gaussian profile.
Parameters
----------
nu : float array
The frequency of the spectrum.
flux : float
The integrated flux of the emission line if the norm parameter is "integrate".
The peak flux density of the emission line if the norm parameter is "peak".
Unit: erg/s/cm^2
nu0 : float
The central frequency of the emission line.
FWHM : float
The full width half maximum (FWHM) of the emission line.
units : string, default: "v"
The unit of the FWHM.
"v": velocity (km/s);
"lambda": wavelength (micron);
"nu": frequency (Hz).
norm : string, default: "peak"
"peak": use the flux parameter as the peak flux density.
"integrate": use the flux parameter as the integrated flux of the line.
Returns
-------
fnu : float array
The flux density of the spectrum.
Notes
-----
None.
"""
ls_km = 2.99792458e5 #km/s
ls_mic = 2.99792458e14 #micron/s
#Convert the FWHM from velocity into the units of frequency.
if units == "v":
FWHM = FWHM / ls_km * nu0
elif units == "lambda":
FWHM = ls_mic / FWHM
elif units == "nu":
FWHM = FWHM
else:
raise ValueError("The units {0} is not recognised!".format(units))
sigma = FWHM / 2.355
#Convert the FWHM into the sigma of the Gaussian function.
if norm == "peak":
amplitude = flux
elif norm == "integrate":
amplitude = flux / (sigma * (2.0 * np.pi)**0.5) * 10**26 #unit: mJy
fnu = amplitude * np.exp(-0.5 * ((nu - nu0) / sigma)**2.0)
return fnu
#Func_end
| 5,691 | 27.893401 | 147 | py |
Fitter | Fitter-master/sedfit/models/model_xl.py | import numpy as np
import cPickle as pickle
import Radiation_Model_Toolkit as rmt
from ..fitter.template import Template
from ..dir_list import template_path
ls_mic = 2.99792458e14 #unit: micron/s
Mpc = 3.08567758e24 #unit: cm
Msun = 1.9891e33 #unit: gram
def Dust_Emission(T, Md, kappa, wave, DL, z, frame="rest"):
"""
Calculate the dust emission using the dust temperature, mass, opacity and
the luminosity distance of the source.
Parameters
----------
T : float
Temperature, unit: Kelvin.
Md : float
The dust mass, unit: Msun.
kappa : float array
The opacity array, unit: cm^2/g.
wave : float array
The wavelength array to caculate, unit: micron.
DL : float
The luminosity distance, unit: Mpc.
z : float
The redshift.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
Returns
-------
de : float array
The dust emission SED, unit: mJy (check!!).
Notes
-----
None.
"""
nu = ls_mic / wave
bb = rmt.Single_Planck(nu, T)
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
de = (1 + z)**idx * (Md * Msun) * bb * kappa / (DL * Mpc)**2 * 1e26 #Unit: mJy
return de
fp = open(template_path+"dust_xl_kdt.tmplt", "r")
grainModel = pickle.load(fp)
fp.close()
#print grainModel["readMe"]
silDict = grainModel["Silicate"]
graDict = grainModel["Graphite"]
tSil = Template(**silDict)
tGra = Template(**graDict)
waveLim = [0.1, 1e3]
def Torus_Emission(typeSil, size, T1Sil, T2Sil, logM1Sil, logM2Sil,
typeGra, T1Gra, T2Gra, R1G2S, R2G2S,
wave, DL, z, frame="rest", TemplateSil=tSil, TemplateGra=tGra,
waveLim=waveLim):
"""
Calculate the emission of the dust torus using the dust opacity and assuming
it is optical thin situation. In detail, the torus emission model assumes that
the dust torus consists silicate and graphite dust. Moreover, each type of
dusts have two average temperature.
"""
#Calculate the opacity curve
parSil = [typeSil, size]
parGra = [typeGra, size]
fltr = (wave > waveLim[0]) & (wave < waveLim[1]) #Only choose the wavelength
#that covered by the template
if np.sum(fltr) == 0:
return np.zeros_like(wave)
kappaSil = TemplateSil(wave[fltr], parSil)
kappaGra = TemplateGra(wave[fltr], parGra)
#Calculate the dust emission SEDs
M1Sil = 10**logM1Sil
M2Sil = 10**logM2Sil
M1Gra = R1G2S * M1Sil
M2Gra = R2G2S * M2Sil
de1Sil = Dust_Emission(T1Sil, M1Sil, kappaSil, wave[fltr], DL, z, frame)
de2Sil = Dust_Emission(T2Sil, M2Sil, kappaSil, wave[fltr], DL, z, frame)
de1Gra = Dust_Emission(T1Gra, M1Gra, kappaGra, wave[fltr], DL, z, frame)
de2Gra = Dust_Emission(T2Gra, M2Gra, kappaGra, wave[fltr], DL, z, frame)
deTorus = np.zeros_like(wave)
deTorus[fltr] = de1Sil + de2Sil + de1Gra + de2Gra
return deTorus
def Torus_Emission_PosPar(typeSil, size, T1Sil, T2Sil, logM1Sil, logM2Sil,
typeGra, T1Gra, T2Gra, R1G2S, R2G2S,
TemplateSil=tSil, TemplateGra=tGra):
"""
Position the parameters in the grid. Specifically, discretize the sizeSil and
sizeGra.
Parameters
----------
Same as the Torus_Emission() function.
Returns
-------
parDict : dict
A dict of parameters with discrete parameters positioned on the grid.
Notes
-----
None.
"""
parSil = [typeSil, size]
parGra = [typeGra, size]
nParSil = TemplateSil.get_nearestParameters(parSil)
nParGra = TemplateGra.get_nearestParameters(parGra)
parDict = {
"typeSil": nParSil[0],
"typeGra": nParGra[0],
"size": nParSil[1],
"T1Sil": T1Sil,
"T2Sil": T2Sil,
"logM1Sil": logM1Sil,
"logM2Sil": logM2Sil,
"T1Gra": T1Gra,
"T2Gra": T2Gra,
"R1G2S": R1G2S,
"R2G2S": R2G2S
}
return parDict
'''
def Torus_Emission_bak(typeSil, sizeSil, T1Sil, T2Sil, logM1Sil, logM2Sil,
typeGra, sizeGra, T1Gra, T2Gra, R1G2S, R2G2S,
wave, DL, TemplateSil=tSil, TemplateGra=tGra):
"""
Calculate the emission of the dust torus using the dust opacity and assuming
it is optical thin situation. In detail, the torus emission model assumes that
the dust torus consists silicate and graphite dust. Moreover, each type of
dusts have two average temperature.
"""
#Calculate the opacity curve
parSil = [typeSil, sizeSil]
parGra = [typeGra, sizeGra]
kappaSil = TemplateSil(wave, parSil)
kappaGra = TemplateGra(wave, parGra)
#Calculate the dust emission SEDs
M1Sil = 10**logM1Sil
M2Sil = 10**logM2Sil
M1Gra = R1G2S * M1Sil
M2Gra = R2G2S * M2Sil
de1Sil = Dust_Emission(T1Sil, M1Sil, kappaSil, wave, DL)
de2Sil = Dust_Emission(T2Sil, M2Sil, kappaSil, wave, DL)
de1Gra = Dust_Emission(T1Gra, M1Gra, kappaGra, wave, DL)
de2Gra = Dust_Emission(T2Gra, M2Gra, kappaGra, wave, DL)
deTorus = de1Sil + de2Sil + de1Gra + de2Gra
return deTorus
def Torus_Emission_PosPar_bak(typeSil, sizeSil, T1Sil, T2Sil, logM1Sil, logM2Sil,
typeGra, sizeGra, T1Gra, T2Gra, R1G2S, R2G2S,
TemplateSil=tSil, TemplateGra=tGra):
"""
Position the parameters in the grid. Specifically, discretize the sizeSil and
sizeGra.
Parameters
----------
Same as the Torus_Emission() function.
Returns
-------
parDict : dict
A dict of parameters with discrete parameters positioned on the grid.
Notes
-----
None.
"""
parSil = [typeSil, sizeSil]
parGra = [typeGra, sizeGra]
nParSil = TemplateSil.get_nearestParameters(parSil)
nParGra = TemplateGra.get_nearestParameters(parGra)
parDict = {
"typeSil": nParSil[0],
"sizeSil": nParSil[1],
"typeGra": nParGra[0],
"sizeGra": nParGra[1]
}
return parDict
'''
if __name__ == "__main__":
import matplotlib.pyplot as plt
typeSil = 0
typeGra = 0
sizeSil = 0.5
sizeGra = 0.5
T1Sil = 800.0
T2Sil = 300.0
T1Gra = 500.0
T2Gra = 200.0
logM1Sil = 3
logM2Sil = 5
R1G2S = 1.5
R2G2S = 1.5
M1Sil = 10**logM1Sil
M2Sil = 10**logM2Sil
M1Gra = R1G2S * M1Sil
M2Gra = R2G2S * M2Sil
"""
wave = 10**np.linspace(0, 3, 1000)
DL = 500.0
deTorus = Torus_Emission(typeSil, sizeSil, T1Sil, T2Sil, logM1Sil, logM2Sil,
typeGra, sizeGra, T1Gra, T2Gra, R1G2S, R2G2S,
wave, DL)
parSil = [typeSil, sizeSil]
parGra = [typeGra, sizeGra]
kappaSil = tSil(wave, parSil)
kappaGra = tGra(wave, parGra)
de1Sil = Dust_Emission(T1Sil, M1Sil, kappaSil, wave, DL)
de2Sil = Dust_Emission(T2Sil, M2Sil, kappaSil, wave, DL)
de1Gra = Dust_Emission(T1Gra, M1Gra, kappaGra, wave, DL)
de2Gra = Dust_Emission(T2Gra, M2Gra, kappaGra, wave, DL)
plt.plot(wave, deTorus, color="k", linewidth=1.5)
plt.plot(wave, de1Sil, color="r", linestyle="--")
plt.plot(wave, de2Sil, color="r", linestyle=":")
plt.plot(wave, de1Gra, color="b", linestyle="--")
plt.plot(wave, de2Gra, color="b", linestyle=":")
plt.xscale("log")
plt.yscale("log")
plt.ylim([1e2, 3e5])
plt.show()
"""
parKws = {
"typeSil": 0.50001,
"sizeSil": 1.22324,
"T1Sil": T1Sil,
"T2Sil": T2Sil,
"logM1Sil": logM1Sil,
"logM2Sil": logM2Sil,
"typeGra": 0.902934,
"sizeGra": 0.45325,
"T1Gra": T1Gra,
"T2Gra": T2Gra,
"R1G2S": R1G2S,
"R2G2S": R2G2S
}
parDict = Torus_Emission_PosPar(**parKws)
print parDict
| 8,046 | 30.806324 | 82 | py |
Fitter | Fitter-master/sedfit/models/model_bc03_refine.py | import numpy as np
import cPickle as pickle
from ..fitter.template import Template
from ..dir_list import template_path
Msun = 1.9891e33 #unit: gram
Mpc = 3.08567758e24 #unit: cm
mJy = 1e26 #unit: erg/s/cm^2/Hz
pi = np.pi
fp = open(template_path+"bc03_sps_cha_kdt.tmplt")
tp_bc03 = pickle.load(fp)
fp.close()
bc03 = Template(**tp_bc03)
waveLim = [1e-2, 1e3]
def BC03_ref(logMs, logAge, sfh, DL, wave, z, frame="rest", t=bc03, waveLim=waveLim):
"""
This function call the interpolated BC03 template to generate the stellar
emission SED with the given parameters.
Parameters
----------
logMs : float
The log10 of stellar mass with the unit solar mass.
logAge : float
The log10 of the age of the stellar population with the unit Gyr.
sfh : int
The code for the SFH, it ranges from 0-5 currently for the following
SFHs:
0 - bc03_ssp_z_0.02_chab.model,
1 - bc03_burst_0.1_z_0.02_chab.model,
2 - bc03_exp_0.1_z_0.02_chab.model,
3 - bc03_exp_1.0_z_0.02_chab.model,
4 - bc03_const_1.0_tV_0.2_z_0.02_chab.model,
5 - bc03_const_1.0_tV_5.0_z_0.02_chab.model.
It is not suggested to let this parameter free, since the relation between
the adjascent models are not continuous.
DL : float
The luminosity distance with the unit Mpc.
wave : float array
The wavelength of the SED.
z : float
The redshift.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : Template class
The interpolated BC03 template.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
fnu : float array
The flux density of the calculated SED with the unit erg/s/cm^2/Hz.
Notes
-----
None.
"""
flux = np.zeros_like(wave)
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
age = 10**logAge
flux[fltr] = t(wave[fltr], [age, sfh])
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
fnu = (1.0 + z)**idx * flux * 10**logMs / (4 * pi * (DL * Mpc)**2) * mJy
return fnu
def BC03_ref_PosPar(logMs, logAge, sfh, t=bc03):
"""
Find the position of the parameters on the discrete grid.
Parameters
----------
logMs : float
The log of the stellar mass, unit: Msun.
age : float
The age of the stellar population, unit: Gyr.
sfh : int
The code for the SFH.
Returns
-------
parDict : dict
The dict of the parameters.
"""
age = 10**logAge
age_d = t.get_nearestParameters([age, sfh])
parDict = {
"logMs": logMs,
"age": age_d
}
return parDict
| 2,918 | 28.19 | 85 | py |
Fitter | Fitter-master/sedfit/models/ndiminterpolation.py | __author__ = "Robert Nikutta <[email protected]>"
__version__ = '20150416'
import numpy as N
import warnings
from scipy import interpolate, ndimage
# Convert RuntimeWarnings, e.g. division by zero in some array elements, to Exceptions
warnings.simplefilter('error', RuntimeWarning)
class NdimInterpolation:
"""N-dimensional interpolation on data hypercubes.
Operates on image(index) coordinates. Multi-linear or cubic-spline
(default).
"""
def __init__(self,data,theta,order=1,mode='log'):
"""Initialize an interpolator object.
Parameters
----------
data : n-dim array or 1-d array
Model database to be interpolated. Sampled on a
rectilinear grid (it need not be regular!). 'data' is
either an n-dimensional array (hypercube), or a
1-dimensional array. If hypercube, each axis corresponds
to one of the model parameters, and the index location
along each axis grows with the parameter value. The last
axis is the 'wavelength' axis. If 'data' is a 1-d array of
values, it will be converted into the hypercube
format. This means that the order of entries in the 1-d
array must be as if constructed via looping over all axes,
i.e.
counter = 0
for j0 in theta[0]:
for j1 in theta[1]:
for j2 in theta[2]:
...
hypercube[j0,j1,j2,...] = onedarray[counter]
counter += 1
theta : list
List of 1-d arrays, each holding in ascending order the
unique values for one of the model parameters. The last
1-d array in theta is the wavelength array. Example: for
the CLUMPY models of AGN tori (Nenkova et al. 2008)
theta = [{i}, {tv}, {q}, {N0}, {sig}, {Y}, {wave}]
where the {.} are 1-d arrays of unique model parameter
values, e.g.
{i} = array([0,10,20,30,40,50,60,70,80,90]) (degrees).
order : int
Order of interpolation spline to be used. order=1
(default) is multi-linear interpolation, order=3 is
cubic-spline (quite a bit slower, and not necessarily
better, especially for complicated n-dim functions. order=1
is recommended.
mode : str
'log' is default, and will take log10(data) first, which
severely improves the interpolation accuracy if the data
span many orders of magnitude. This is of course only
applicable if all entries in 'data' are greater than
0. Any string other that 'log' will keep 'data' as-is.
Returns
-------
NdimInterpolation instance.
Example
-------
General way to use ndiminterpolation
ipython --pylab
In[0]: import ndiminterpolation as nd
In[1]: ip, datacube, theta, mywave = nd.example()
See also example.py in the same repository.
"""
self.theta = theta # list of lists of parameter values, unique, in correct order
shape_ = [len(t) for t in self.theta]
# determine if data is hypercube or list of 1d arrays
if shape_ == data.shape:
self.input = 'hypercube'
self.data_hypercube = data
else:
self.input = 'linear'
self.data_hypercube = data.reshape(shape_,order='F')
# interpolation orders
assert (order in (1,3)), "Interpolation spline order not supported! Must be 1 (linear) or 3 (cubic)."
self.order = order
# interpolate in log10 space?
self.mode = mode
if self.mode == 'log':
try:
self.data_hypercube = N.log10(self.data_hypercube)
except RuntimeWarning:
raise Exception, "For mode='log' all entries in 'data' must be > 0."
# set up n 1-d linear interpolators for all n parameters in theta
self.ips = [] # list of 1-d interpolator objects
for t in self.theta:
self.ips.append(interpolate.interp1d(t,N.linspace(0.,float(t.size-1.),t.size)))
if self.order == 3:
print "Evaluating cubic spline coefficients for subsequent use, please wait..."
self.coeffs = ndimage.spline_filter(self.data_hypercube,order=3)
print "Done."
def get_coords(self,vector,pivots=None):
"""Take real-world vector of parameter values, return image coordinates.
Parameters
----------
vector : 1-d array
Values of every parameter at which the interpolation
should be performed. This is on the input parameter
hypercube.
pivots : 1-d array
Pivot points on the output, on which to deliver the
interpolation results.
Examples
--------
vector = [0., 100.4, 2.4]
pivots = [1.,3.] # can be e.g. microns, or the running number of a photometric band, etc.
Then, get_coords(vector,pivots) returns the image coordinates at
parameter 1/2/3 = [0., 100.4, 2.4] and at pivot points = [1.,3.]
"""
len_vector = len(vector)
if pivots is None:
pivots = self.theta[-1] # pivots are theta without the wave/bands
coords = N.zeros((len_vector+1,pivots.size)) # 'x-prime'
for i,ip in enumerate(self.ips):
if i < len_vector:
# unfortunately we have to repeat most of the coordinate array,
# because we want the interpolated output on multiple pivot points
coords[i,:] = N.repeat( ip(vector[i]), pivots.size )
else:
coords[i,:] = ip(pivots)
return coords
def __call__(self,vector,pivots):
"""Interpolate in N dimensions, using mapping to image coordinates."""
if self.order == 1:
aux = ndimage.map_coordinates(self.data_hypercube,self.get_coords(vector,pivots=pivots),order=1)
elif self.order == 3:
aux = ndimage.map_coordinates(self.coeffs,self.get_coords(vector,pivots=pivots),order=3,prefilter=False)
if self.mode == 'log':
aux = 10.**aux
return aux
| 6,426 | 34.313187 | 116 | py |
Fitter | Fitter-master/sedfit/models/model_analyticals.py | import numpy as np
import Radiation_Model_Toolkit as rmt
__all__ = ["BlackBody", "Modified_BlackBody", "Power_Law", "Synchrotron",
"Linear", "Line_Gaussian_L", "Poly3"]
ls_mic = 2.99792458e14 #unit: micron/s
Mpc = 3.08567758e24 #unit: cm
mJy = 1e-26 #1 mJy in erg/s/cm^2/Hz
def BlackBody(logOmega, T, wave):
"""
Calculate the flux density of a blackbody emitter.
Parameters
----------
logOmega : float
The log10 of the solid angle subtended by the emitter.
T : float
The temperature of the emitter, unit: Kelvin.
wave : float array
The wavelength to be calculated.
Returns
-------
flux : float array
The flux density of corresponding to the input wavelength, unit: mJy.
Notes
-----
None.
"""
nu = ls_mic / wave
flux = rmt.BlackBody(nu, logOmega=logOmega, T=T) / mJy #unit: mJy
return flux
def Modified_BlackBody(logM, T, beta, wave, DL, z, kappa0=16.2, lambda0=140, frame="rest"):
"""
This function is a wrapper to calculate the modified blackbody model.
Parameters
----------
logM : float
The dust mass in the unit solar mass.
T : float
The temperature of the dust.
beta : float
The dust emissivity which should be around 2.
wave : float array
The wavelengths of the calculated fluxes.
DL : float
The luminosity distance in the unit Mpc.
z : float
The redshift of the source.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
kappa0 : float, default: 16.2
The normalisation opacity.
lambda0 : float, default: 140
The normalisation wavelength.
Returns
-------
flux : float array
The flux at the given wavelengths to calculate.
Notes
-----
None.
"""
nu = ls_mic / wave
flux = rmt.Dust_Modified_BlackBody(nu, logM, DL, beta, T, z, frame, kappa0, lambda0)
return flux
def Power_Law(PL_alpha, PL_logsf, wave):
"""
This function is a wrapper to calculate the power law model.
Parameters
----------
PL_alpha : float
The power-law index.
PL_logsf : float
The log of the scaling factor.
wave : float array
The wavelength.
Returns
-------
flux : float array
The flux at the given wavelengths to calculate.
Notes
-----
None.
"""
nu = ls_mic / wave
flux = rmt.Power_Law(nu, PL_alpha, 10**PL_logsf)
return flux
def Synchrotron(Sn_alpha, Sn_logsf, wave, lognuc=13, lognum=14):
"""
Calculate the model of synchrotron emission,
fnu ~ nu^-alpha (nu < nu0)
~ nu^-(alpha+0.5) (nu > nu0),
which comes from Figure 1 of Pe'er, Space Sci Rev (2014) 183:371.
Parameters
----------
Sn_alpha : float
The power-law index.
Sn_logsf : float
The log of the scaling factor.
wave : float array
The wavelength.
lognuc : float
The log of the cooling frequency (unit: Hz).
lognum : float
The maximum frequency above which there is no synchrotron emission
(unit: Hz).
Returns
-------
flux : float array
The flux at the given wavelengths to calculate.
Notes
-----
None.
"""
num = 10**(lognum - lognuc)
nu = np.atleast_1d(ls_mic / wave) / 10**lognuc
fltr_m = nu < num
fltr_h = (nu > 1) & fltr_m
fltr_l = (nu <= 1) & fltr_m
flux = np.zeros_like(nu)
sf = 10**Sn_logsf
flux[fltr_h] = sf * nu[fltr_h]**(-Sn_alpha-0.5)
flux[fltr_l] = sf * nu[fltr_l]**(-Sn_alpha)
return flux
def Linear(a, b, x):
return a * np.atleast_1d(x) + b
def Line_Gaussian_L(wavelength, logLum, lambda0, FWHM, DL):
"""
The wrapper of the function Line_Profile_Gaussian() to use wavelength and
luminosity as the parameters.
Calculate the flux density of the emission line with a Gaussian profile.
Parameters
----------
wavelength : float array
The wavelength of the spectrum.
logLum : float
The log of luminosity of the line, unit: erg/s.
lambda0 : float
The central wavelength of the emission line.
FWHM : float
The full width half maximum (FWHM) of the emission line.
DL : float
The luminosity distance, unit: Mpc.
Returns
-------
fnu : float array
The flux density of the spectrum, units: mJy.
Notes
-----
None.
"""
flux = 10**logLum / (4 * np.pi * (DL * Mpc)**2.0)
nu = ls_mic / wavelength
nu0 = ls_mic / lambda0
fnu = rmt.Line_Profile_Gaussian(nu, flux, nu0, FWHM, norm="integrate")
return fnu
from numpy.polynomial.polynomial import polyval
def Poly3(x, c0, c1, c2, c3):
"""
This is a log 3rd order polynomial function. It calls polyval from numpy.
y = 10^(c0 + c1 * x + c2 * x^2 + c3 * x^3)
Parameters
----------
x : array like
The input active variable.
c0 -- c3 : floats
The coefficiants for 0th to 3rd order.
Returns
-------
y : array like
The function result.
"""
x = np.atleast_1d(x)
y = 10**polyval(x, [c0, c1, c2, c3])
return y
if __name__ == "__main__":
import matplotlib.pyplot as plt
wave = np.linspace(-5, 5, 20)
flux = Poly3(wave, 5, -8., 2, 1)
#flux = Synchrotron(0.8, 5, wave, lognuc=13, lognum=14)
plt.plot(wave, flux)
#plt.axvline(x=ls_mic/1e13, color="r", linestyle=":")
#plt.axvline(x=ls_mic/1e14, color="r", linestyle=":")
#plt.xscale('log')
plt.yscale('log')
plt.show()
| 5,625 | 25.28972 | 91 | py |
Fitter | Fitter-master/sedfit/models/model_bc03.py | import numpy as np
import cPickle as pickle
from ..fitter.template import Template
from ..dir_list import template_path
Msun = 1.9891e33 #unit: gram
Mpc = 3.08567758e24 #unit: cm
mJy = 1e26 #unit: erg/s/cm^2/Hz
pi = np.pi
fp = open(template_path+"bc03_kdt.tmplt")
tp_bc03 = pickle.load(fp)
fp.close()
bc03 = Template(**tp_bc03)
waveLim = [1e-2, 1e3]
def BC03(logMs, age, DL, wave, z, frame="rest", t=bc03, waveLim=waveLim):
"""
This function call the interpolated BC03 template to generate the stellar
emission SED with the given parameters.
Parameters
----------
logMs : float
The log10 of stellar mass with the unit solar mass.
age : float
The age of the stellar population with the unit Gyr.
DL : float
The luminosity distance with the unit Mpc.
wave : float array
The wavelength of the SED.
z : float
The redshift.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : Template class
The interpolated BC03 template.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
fnu : float array
The flux density of the calculated SED with the unit erg/s/cm^2/Hz.
Notes
-----
None.
"""
flux = np.zeros_like(wave)
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
flux[fltr] = t(wave[fltr], [age])
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
fnu = (1.0 + z)**idx * flux * 10**logMs / (4 * pi * (DL * Mpc)**2) * mJy
return fnu
def BC03_PosPar(logMs, age, t=bc03):
"""
Find the position of the parameters on the discrete grid.
Parameters
----------
logMs : float
The log of the stellar mass, unit: Msun.
age : float
The age of the stellar population, unit: Gyr.
Returns
-------
parDict : dict
The dict of the parameters.
"""
age_d = t.get_nearestParameters([age])
parDict = {
"logMs": logMs,
"age": age_d
}
return parDict
#Func_bgn:
#-------------------------------------#
# Created by SGJY, May. 3, 2016 #
#-------------------------------------#
#From: dev_CLUMPY_intp.ipynb
def Stellar_SED(logMs, age, zs, wave, band="h", zf_guess=1.0, spsmodel="bc03_ssp_z_0.02_chab.model"):
"""
This function obtain the galaxy stellar SED given the stellar mass, age and redshift. The
default model is Bruzual & Charlot (2003) with solar metallicity and Chabrier IMF. The stellar
synthesis models are organised by the module EzGal (http://www.baryons.org/ezgal/).
Parameters
----------
logMs : float
The stellar mass in log10 of solar unit.
age : float
The age of the galaxy, in the unit of Gyr.
zs : float
The redshift of the source.
wave : array
The sed wavelength corresponding to the sedflux. In units micron.
band : str, default: "h"
The reference band used to calculate the mass-to-light ratio.
zf_guess : float. zf_guess=1.0 by default.
The initial guess to solve the zf that allowing the age between
zs and zf is as required.
spsmodel : string. spsmodel="bc03_ssp_z_0.02_chab.model" by default.
The stellar population synthesis model that is used.
Returns
-------
flux : array
The sed flux of the bulge. In units mJy.
Notes
-----
None.
"""
import ezgal #Import the package for stellar synthesis.
from scipy.optimize import fsolve
ls_mic = 2.99792458e14 #micron/s
model = ezgal.model(spsmodel) #Choose a stellar population synthesis model.
model.set_cosmology(Om=0.308, Ol=0.692, h=0.678)
func_age = lambda zf, zs, age: age - model.get_age(zf, zs) #To solve the formation redshift given the
#redshift of the source and the stellar age.
func_MF = lambda Msun, Mstar, m2l: Msun - 2.5*np.log10(Mstar/m2l) #Calculate the absolute magnitude of
#the galaxy. Msun is the absolute mag
#of the sun. Mstar is the mass of the
#star. m2l is the mass to light ratio.
func_flux = lambda f0, MF, mu: f0 * 10**(-0.4*(MF + mu)) #Calculate the flux density of the galaxy. f0
#is the zero point. MF is the absolute magnitude
#of the galaxy at certain band. mu is the distance
#module.
Ms = 10**logMs #Calculate the stellar mass.
age_up = model.get_age(1500., zs)
if age > age_up:
raise ValueError("The age is too large!")
zf = fsolve(func_age, zf_guess, args=(zs, age)) #Given the source redshift and the age, calculate the redshift
#for the star formation.
Msun_H = model.get_solar_rest_mags(nzs=1, filters=band, ab=True) #The absolute magnitude of the Sun in given band.
m2l = model.get_rest_ml_ratios(zf, band, zs) #Calculate the mass-to-light ratio.
M_H = func_MF(Msun_H, Ms, m2l) #The absolute magnitude of the galaxy in given band.
#Calculate the flux at given band for comparison.
f0 = 3.631e6 #Zero point of AB magnitude, in unit of mJy.
mu = model.get_distance_moduli(zs) #The distance module
flux_H = func_flux(f0, M_H, mu)
wave_H = 1.6448 #Pivot wavelength of given band, in unit of micron.
#Obtain the SED
wave_rst = model.ls / 1e4 #In unit micron.
flux_rst = model.get_sed(age, age_units="gyrs", units="Fv") * 1e26 #In unit mJy.
wave_ext = np.linspace(200, 1000, 30)
flux_ext = np.zeros(30)
wave_extd = np.concatenate([wave_rst, wave_ext])
flux_extd = np.concatenate([flux_rst, flux_ext])
#Normalize the SED at the given band.
#The normalization provided by EzGal is not well understood, so I do not use it.
f_int = interp1d(wave_extd, flux_extd)
f_H = f_int(wave_H)
flux = flux_extd * flux_H/f_H
sedflux = f_int(wave) * flux_H/f_H
#return sedflux, wave_extd, flux_extd, wave_H, flux_H #For debug
return sedflux
#Func_end
#Func_bgn:
#-------------------------------------#
# Created by SGJY, May. 3, 2016 #
#-------------------------------------#
#From: dev_CLUMPY_intp.ipynb
def Stellar_SED_scale(logMs, flux_star_1Msun, wave):
"""
This function scales the stellar SED to obtain the best-fit stellar mass.
The input SED flux should be normalised to 1 solar mass.
Parameters
----------
logMs : float
The log stellar mass in unit solar mass
flux_star_1Msun : float array
The flux of stellar SED model. It is normalized to one solar mass.
Returns
-------
flux : float array
Notes
----
None.
"""
Ms = 10**logMs
flux = Ms*flux_star_1Msun
if len(wave) != len(flux):
raise ValueError("The input wavelength is incorrect!")
return flux
#Func_end
| 7,374 | 35.509901 | 118 | py |
Fitter | Fitter-master/sedfit/models/model_clumpy.py | import h5py
import numpy as np
import cPickle as pickle
import ndiminterpolation as ndip
from ..fitter.template import Template
from ..dir_list import template_path
pi = np.pi
Mpc = 3.08567758e24 #unit: cm
#Func_bgn:
#-------------------------------------#
# Created by SGJY, May. 3, 2016 #
#-------------------------------------#
#From: dev_CLUMPY_intp.ipynb
### CLUMPY template
try:
#clumpyFile = template_path+"clumpy_models_201410_tvavg.hdf5"
clumpyFile = template_path+"clumpy_fnu_norm.hdf5"
h = h5py.File(clumpyFile,"r")
theta = [np.unique(h[par][:]) for par in ("i","tv","q","N0","sig","Y","wave")]
data = h["flux_tor"].value
wave_tmpl = h["wave"].value
ip = ndip.NdimInterpolation(data,theta)
h.close()
except:
print("[model_functions]: Fail to import the CLUMPY template from: {0}".format(clumpyFile))
ip = None
waveLim = [1e-2, 1e3]
def CLUMPY_intp(logL, i, tv, q, N0, sigma, Y, wave, DL, z, frame="rest", t=ip, waveLim=waveLim):
"""
This function provide the dust torus MIR flux with CLUMPY model.
Parameters
----------
logL : float
The log of the torus luminosity, unit erg/s.
i : float
The inclination angle of the torus to the observer.
tv : float
The visual optical depth of individual clumps.
q : float
The radial distribution power law exponent of the dust clumps.
N0 : float
The total number of clumps along the radial equatorial ray.
sigma : float
The angular distribution with of the torus.
Y : float
The radial torus relative thickness, Y=Ro/Rd.
wave : float array
The wavelength at which we want to calculate the flux.
DL : float
The luminosity distance, unit: Mpc.
z : float
The redshift.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : NdimInterpolation class
The NdimInterpolation class obtained from Nikutta"s interpolation code.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : array of float
The flux density (F_nu) from the model, unit: mJy.
Notes
-----
None.
"""
vector = np.array([i, tv, q, N0, sigma, Y])
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
f0 = (1 + z)**idx * 10**(logL+26) / (4 * pi * (DL * Mpc)**2.) #Convert to mJy unit
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
flux = np.zeros_like(wave)
flux[fltr] = f0 * t(vector, wave[fltr])
return flux
#Func_end
'''
fp = open("/Users/jinyi/Work/mcmc/Fitter/template/clumpy_kdt.tmplt")
tp_clumpy = pickle.load(fp)
fp.close()
tclumpy = Template(**tp_clumpy)
def Clumpy(logL, i, tv, q, N0, sigma, Y, wave, DL, t=tclumpy):
"""
The CLUMPY model generating the emission from the clumpy torus.
Parameters
----------
logL : float
The log of the torus luminosity, unit erg/s.
i : float
The inclination angle of the torus to the observer.
tv : float
The visual optical depth of individual clumps.
q : float
The radial distribution power law exponent of the dust clumps.
N0 : float
The total number of clumps along the radial equatorial ray.
sig : float
The angular distribution with of the torus.
Y : float
The radial torus relative thickness, Y=Ro/Rd.
wave : float array
The wavelength at which we want to calculate the flux.
DL : float
The luminosity distance
t : NdimInterpolation class
The NdimInterpolation class obtained from Nikutta"s interpolation code.
Returns
-------
flux : array of float
The flux density (F_nu) from the model.
Notes
-----
None.
"""
par = [i, tv, q, N0, sigma, Y]
f0 = 10**(logL+26) / (4 * np.pi * (DL * Mpc)**2.) #Convert to mJy unit
flux = f0 * t(wave, par)
return flux
'''
| 4,132 | 29.843284 | 96 | py |
Fitter | Fitter-master/sedfit/models/__init__.py | 0 | 0 | 0 | py |
|
Fitter | Fitter-master/sedfit/models/model_cat3d_G.py | import numpy as np
import cPickle as pickle
from ..fitter.template import Template
from scipy.interpolate import splev
from ..dir_list import template_path
Msun = 1.9891e33 #unit: gram
Mpc = 3.08567758e24 #unit: cm
m_H = 1.6726219e-24 #unit: gram
r0 = 1.1 # pc
fp = open(template_path+"Cat3d_G.tmplt")
tp_cat3d_G = pickle.load(fp)
fp.close()
tcat3d_G = Template(**tp_cat3d_G)
waveLim = [1.0, 1e4]
def Cat3d_G(a, theta, N0, i, logL, DL, z, wave, frame="rest", t=tcat3d_G, waveLim=waveLim):
"""
This function generates the modified CLUMPY torus radiation from Garcia-Gonzalez et al. 2017.
Parameters
----------
a : float
The index of the radial dust cloud distribution power law.
N0 : float
The number of clouds along an equatorial line-of-sight.
theta : degree
The half-opening angle.
i : degree
Inclination angle.
logL : float
Torus luminosity erg/s in log.
DL : float
The luminosity distance.
z : float
The redshift.
wave : float array
The wavelengths of the output flux.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : Template object
The template of DL07 model provided by user.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : float array
The flux density of the model.
Notes
-----
None.
"""
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
para = [a, theta, N0, i]
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
f0 = (1 + z)**idx * 10**(logL - 46) * (r0 / DL * 1e-6)**2
flux = np.zeros_like(wave)
flux[fltr] = f0 * t(wave[fltr], para) * 1e29 # unit: mJy
return flux
def Cat3d_G_PosPar(a, theta, N0, i, logL, t=tcat3d_G):
"""
To position the parameters in the parameter grid. If true, the function
will return the paramteter grid found nearest from the input parameters
using the KDTree template.
Parameters
----------
a : float
The index of the radial dust cloud distribution power law.
N0 : float
The number of clouds along an equatorial line-of-sight.
theta : degree
The half-opening angle.
i : degree
Inclination angle.
logL : float
Torus luminosity erg/s in log.
t : Template object
The template of DL07 model provided by user.
Returns
-------
param : list
The parameters of the template used for the input parameters.
Notes
-----
None.
"""
par = [a, theta, N0, i]
Par = t.get_nearestParameters(par)
Pardict = {
'a': Par[0],
'theta': Par[1],
'N0': Par[2],
'i': Par[3],
'logL': logL
}
return Pardict
| 2,993 | 25.034783 | 97 | py |
Fitter | Fitter-master/sedfit/models/model_cat3d_H_wind.py | import numpy as np
import cPickle as pickle
from ..fitter.template import Template
from scipy.interpolate import splev
from ..dir_list import template_path
Msun = 1.9891e33 #unit: gram
Mpc = 3.08567758e24 #unit: cm
m_H = 1.6726219e-24 #unit: gram
r0 = 1.1 # pc
fp = open(template_path+"Cat3d_H_wind.tmplt")
tp_cat3d_H = pickle.load(fp)
fp.close()
tcat3d_H_wind = Template(**tp_cat3d_H)
waveLim = [0.1, 1e4]
def Cat3d_H_wind(a, h, N0, i, fwd, aw, thetaw, thetasig, logL, DL, z, wave, frame="rest", t=tcat3d_H_wind, waveLim=waveLim):
"""
This function generates the modified CLUMPY torus radiation from Garcia-Gonzalez et al. 2017.
Parameters
----------
a : float
The index of the radial dust cloud distribution power law.
N0 : float
The number of clouds along an equatorial line-of-sight.
h : float
Vertical Gaussian distribution dimensionless scale height.
i : degree
Inclination angle.
logL : float
UV luminosity erg/s in log.
DL : float
The luminosity distance.
z : float
The redshift.
wave : float array
The wavelengths of the output flux.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : Template object
The template of DL07 model provided by user.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : float array
The flux density of the model.
Notes
-----
None.
"""
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
para = [a, h, N0, i, fwd, aw, thetaw, thetasig]
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
f0 = (1 + z)**idx * 10**(logL - 46) * (r0 / DL * 1e-6)**2
flux = np.zeros_like(wave)
flux[fltr] = f0 * t(wave[fltr], para) * 1e29 # unit: mJy
return flux
def Cat3d_H_wind_PosPar(a, h, N0, i, fwd, aw, thetaw, thetasig, logL, t=tcat3d_H_wind):
"""
To position the parameters in the parameter grid. If true, the function
will return the paramteter grid found nearest from the input parameters
using the KDTree template.
Parameters
----------
a : float
The index of the radial dust cloud distribution power law.
N0 : float
The number of clouds along an equatorial line-of-sight.
h : float
Vertical Gaussian distribution dimensionless scale height.
i : degree
Inclination angle.
logL : float
Torus luminosity erg/s in log.
t : Template object
The template of torus model provided by user.
logtau_torus : float
Scale of 9.7 micron extinction strength
Returns
-------
param : list
The parameters of the template used for the input parameters.
Notes
-----
None.
"""
par = [a, h, N0, i, fwd, aw, thetaw, thetasig]
Par = t.get_nearestParameters(par)
Pardict = {
'a': Par[0],
'h': Par[1],
'N0': Par[2],
'i': Par[3],
'fwd': Par[4],
'aw': Par[5],
'thetaw': Par[6],
'thetasig': Par[7],
'logL': logL
}
return Pardict | 3,316 | 26.641667 | 124 | py |
Fitter | Fitter-master/sedfit/models/model_pah.py | import numpy as np
from scipy.interpolate import interp1d
from ..dir_list import template_path
Mpc = 3.08567758e24 #unit: cm
pi = np.pi
tb = np.genfromtxt(template_path+"PAH.template_HLC.dat")
twave = tb[:, 0]
tflux_temp = tb[:, 1]
norm = np.trapz(tflux_temp, twave)
tflux = tflux_temp / norm
tPAH = interp1d(twave, tflux)
waveLim = [np.min(twave), np.max(twave)]
def pah(logLpah, wave, DL, z, frame="rest", t=tPAH, waveLim=waveLim):
"""
Calculate the model flux density of the PAH template.
Parameters
----------
logLpah : float
The log luminosity of the PAH template, unit: erg/s.
wave : float array
The wavelength array of the model SED, unit: match the template.
DL : float
The luminosity density, unit: Mpc.
z : float
The redshift.
frame : string
"rest" for the rest frame SED and "obs" for the observed frame.
t : function
The interpolated PAH template, which should have been normalised.
waveLim : list
The min and max of the wavelength covered by the template.
Returns
-------
flux : float array
The model flux density, unit: mJy.
Notes
-----
None.
"""
if frame == "rest":
idx = 2.0
elif frame == "obs":
idx = 1.0
else:
raise ValueError("The frame '{0}' is not recognised!".format(frame))
flux = np.zeros_like(wave)
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
if np.sum(fltr) == 0:
return np.zeros_like(wave)
f0 = (1 + z)**idx * 10**(logLpah+26) / (4 * pi * (DL * Mpc)**2.) #Convert to mJy unit
flux[fltr] = f0 * t(wave[fltr])
return flux
if __name__ == "__main__":
import matplotlib.pyplot as plt
print waveLim
wave = 10**np.linspace(0, 3, 1000)
flux = pah(40, wave, DL=30, z=0)
plt.plot(wave, flux)
plt.xscale("log")
plt.yscale("log")
plt.show()
print np.trapz(flux, wave) * (4 * pi * (30 * Mpc)**2.) / 1e26
| 1,971 | 26.774648 | 89 | py |
Fitter | Fitter-master/sedfit/fitter/basicclass.py | #The code comes from Composite_Model_Fit/dl07/dev_DataClass.ipynb
import types
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
from .. import model_functions as sedmf
#Data class#
#----------#
#The basic class of data unit
class DataUnit(object):
def __init__(self, x, y, e, f=1):
self.__x = x #Data x
self.__y = y #Data value
self.__e = e #Error
if (f==1) | (f==0):
self.__f = int(f) #Flag to use the data or not
else:
raise ValueError("The flag should be 0 or 1!")
def get_x(self):
return self.__x
def get_y(self):
return self.__y
def get_e(self):
return self.__e
def get_f(self):
return self.__f
def gdu(self):
"""
Get the data unit.
"""
return (self.__x, self.__y, self.__e, self.__f)
def set_flag(self, f):
"""
Change the flag of the data.
"""
if (f==1) | (f==0):
self.__f = int(f)
else:
raise ValueError("The flag should be 0 or 1!")
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
#The discrete data set unit
class DiscreteSet(object):
def __init__(self, nameList, xList, yList, eList, fList, dataType=None):
self.__nameList = nameList
self.__defaultType = ["name", "x", "y", "e", "f"]
if dataType is None:
self.__userType = []
else:
if len(dataType) != 5:
raise ValueError("The dataType should contain 5 strings!")
self.__userType = dataType
self.__dataMap = {}
for loop in range(5):
#Build a map from the new dataType to the defaultType
self.__dataMap[dataType[loop]] = self.__defaultType[loop]
args = [xList, yList, eList, fList]
self.__unitNumber = len(nameList)
match = True
for arg in args:
match = match&(self.__unitNumber==len(arg))
if not match:
raise ValueError("The inputs do not match in length!")
#Generate the dict of discrete unit
self.__dataUnitDict = {}
for loop in range(self.__unitNumber):
name = nameList[loop]
x = xList[loop]
y = yList[loop]
e = eList[loop]
f = fList[loop]
self.__dataUnitDict[name] = DataUnit(x, y, e, f)
self.__xList = []
self.__yList = []
self.__eList = []
self.__fList = []
for name in nameList:
x, y, e, f = self.__dataUnitDict[name].gdu()
self.__xList.append(x)
self.__yList.append(y)
self.__eList.append(e)
self.__fList.append(f)
def __getitem__(self, i):
"""
Get the data of one unit or one array of data for all the units.
"""
nameList = self.__nameList
if i in nameList:
item = self.__dataUnitDict[i].gdu()
elif i in self.__userType:
ncol = self.__userType.index(i) - 1
item = []
for name in nameList:
if ncol == -1:
item.append(name)
else:
item.append( self.__dataUnitDict[name].gdu()[ncol] )
elif i in self.__defaultType:
ncol = self.__defaultType.index(i) - 1
item = []
for name in nameList:
if ncol == -1:
item.append(name)
else:
item.append( self.__dataUnitDict[name].gdu()[ncol] )
else:
raise ValueError("The item is not recognised!")
return item
def get_dataUnitDict(self):
return self.__dataUnitDict
def get_dataDict(self):
"""
Generate the data dict.
"""
nameList = self.__nameList
dataDict = {}
for name in nameList:
dataDict[name] = self.__dataUnitDict[name].gdu()
return dataDict
def get_nameList(self):
return self.__nameList
def get_xList(self):
return self.__xList
def get_yList(self):
return self.__yList
def get_eList(self):
return self.__eList
def get_fList(self):
return self.__fList
def get_List(self, typeName):
if typeName in self.__userType:
exec "requiredList = self.get_{0}List()".format(self.__dataMap[typeName])
elif typeName in self.__defaultType:
exec "requiredList = self.get_{0}List()".format(typeName)
else:
raise KeyError("The key '{0}' is not found!".format(typeName))
return requiredList
def set_fList(self, fList):
nameList = self.__nameList
if len(fList) != self.__unitNumber:
raise ValueError("The fList size is incorrect!")
return 0
for loop in range(self.__unitNumber):
name = nameList[loop]
self.__dataUnitDict[name].set_flag(fList[loop])
self.__fList[loop] = int(fList[loop])
return 1
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
#The continual data set unit
class ContinueSet(object):
def __init__(self, xList, yList, eList, fList, dataType=None):
self.__defaultType = ["x", "y", "e", "f"]
if dataType is None:
self.__userType = []
else:
if len(dataType) != 4:
raise ValueError("The dataType should contain 4 strings!")
self.__userType = dataType
self.__dataMap = {}
for loop in range(4):
#Build a map from the new dataType to the defaultType
self.__dataMap[dataType[loop]] = self.__defaultType[loop]
args = [yList, eList, fList]
self.__unitNumber = len(xList)
match = True
for arg in args:
match = match&(self.__unitNumber==len(arg))
if not match:
raise ValueError("The inputs do not match in length!")
#Generate the dict of discrete unit
self.__dataUnitList = []
for loop in range(self.__unitNumber):
x = xList[loop]
y = yList[loop]
e = eList[loop]
f = fList[loop]
self.__dataUnitList.append( DataUnit(x, y, e, f) )
self.__xList = []
self.__yList = []
self.__eList = []
self.__fList = []
for dunit in self.__dataUnitList:
x, y, e, f = dunit.gdu()
self.__xList.append(x)
self.__yList.append(y)
self.__eList.append(e)
self.__fList.append(f)
def __getitem__(self, i):
"""
Get the data of one unit or one array of data for all the units.
"""
if i in self.__userType:
ncol = self.__userType.index(i)
item = []
for loop in range(self.__unitNumber):
item.append( self.__dataUnitList[loop].gdu()[ncol] )
if i in self.__defaultType:
ncol = self.__defaultType.index(i)
item = []
for loop in range(self.__unitNumber):
item.append( self.__dataUnitList[loop].gdu()[ncol] )
elif (type(i)==types.IntType) & (i > 0) & (i < self.__unitNumber):
item = self.__dataUnitList[i].gdu()
else:
raise ValueError("The item is not recognised!")
return item
def get_dataUnitList(self):
return self.__dataUnitList
def get_dataDict(self):
dataDict = {}
if len(self.__userType) > 0:
dtList = self.__userType
else:
dtList = self.__defaultType
for loop_dt in range(len(dtList)):
dt = dtList[loop_dt]
dataDict[dt] = []
for loop_un in range(self.__unitNumber):
dataDict[dt].append(self.__dataUnitList[loop_un].gdu()[loop_dt])
return dataDict
def get_xList(self):
return self.__xList
def get_yList(self):
return self.__yList
def get_eList(self):
return self.__eList
def get_fList(self):
return self.__fList
def get_List(self, typeName):
if typeName in self.__userType:
exec "requiredList = self.get_{0}List()".format(self.__dataMap[typeName])
elif typeName in self.__defaultType:
exec "requiredList = self.get_{0}List()".format(typeName)
else:
raise KeyError("The key '{0}' is not found!".format(typeName))
return requiredList
def set_fList(self, fList):
if len(fList) != self.__unitNumber:
raise ValueError("The fList size is incorrect!")
return 0
for loop in range(self.__unitNumber):
self.__dataUnitList[loop].set_flag(fList[loop])
self.__fList[loop] = int(fList[loop])
return 1
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
#The total data set that contains a number of "DiscreteSet"s and "ContinueSet"s
class DataSet(object):
"""
The data set that contains discrete and continual data.
Parameters
----------
dSetDict : dict
The dict of DiscreteSet.
cSetDict : dict
The dict of ContinueSet.
"""
def __init__(self, dSetDict={}, cSetDict={}):
#Check the data format
self.__dataType = ["x", "y", "e", "f"]
self.__discreteSetDict = {}
for dSetName in dSetDict.keys():
dSet = dSetDict[dSetName]
if isinstance(dSet, DiscreteSet):
self.__discreteSetDict[dSetName] = dSet
else:
raise ValueError("The {0} discrete set is incorrect!".format(dSetName))
self.__continueSetDict = {}
for cSetName in cSetDict.keys():
cSet = cSetDict[cSetName]
if isinstance(cSet, ContinueSet):
self.__continueSetDict[cSetName] = cSet
def add_DiscreteSet(self, dSetDict):
for dSetName in dSetDict.keys():
dSet = dSetDict[dSetName]
if isinstance(dSet, DiscreteSet):
self.__discreteSetDict[dSetName] = dSet
else:
raise ValueError("The {0} discrete set is incorrect!".format(dSetName))
def add_ContinueSet(self, cSetDict):
for cSetName in cSetDict.keys():
cSet = cSetDict[cSetName]
if isinstance(cSet, ContinueSet):
self.__continueSetDict[cSetName] = cSet
def get_DiscreteSetDict(self):
return self.__discreteSetDict
def get_ContinueSetDict(self):
return self.__continueSetDict
def get_dsDict(self):
dsDict = {}
for dSetName in self.__discreteSetDict.keys():
dSet = self.__discreteSetDict[dSetName]
dsDict[dSetName] = dSet.get_dataDict()
return dsDict
def get_csDict(self):
csDict = {}
for cSetName in self.__continueSetDict.keys():
cSet = self.__continueSetDict[cSetName]
csDict[cSetName] = cSet.get_dataDict()
return csDict
def get_dsArrays(self):
dsaDict = {}
for dSetName in self.__discreteSetDict.keys():
dSet = self.__discreteSetDict[dSetName]
dsArray = []
for d in self.__dataType:
exec "dsArray.append(dSet.get_{0}List())".format(d)
dsaDict[dSetName] = dsArray
return dsaDict
def get_csArrays(self):
csaDict = {}
for cSetName in self.__continueSetDict.keys():
cSet = self.__continueSetDict[cSetName]
csArray = []
for d in self.__dataType:
exec "csArray.append(cSet.get_{0}List())".format(d)
csaDict[cSetName] = csArray
return csaDict
def get_unitNameList(self):
unlList = []
for dSetName in self.__discreteSetDict.keys():
dSet = self.__discreteSetDict[dSetName]
unlList.append(dSet.get_nameList())
unList = []
[unList.extend(unl) for unl in unlList]
return unList
def get_dsList(self, typeName):
if not typeName in self.__dataType:
raise KeyError("The key '{0}' is not a data type!".format(typeName))
dslList = []
for dSetName in self.__discreteSetDict.keys():
dSet = self.__discreteSetDict[dSetName]
dslList.append(dSet.get_List(typeName))
dsList = []
[dsList.extend(dsl) for dsl in dslList]
return dsList
def get_csList(self, typeName):
if not typeName in self.__dataType:
raise KeyError("The key '{0}' is not a data type!".format(typeName))
cslList = []
for cSetName in self.__continueSetDict.keys():
cSet = self.__continueSetDict[cSetName]
cslList.append(cSet.get_List(typeName))
csList = []
[csList.extend(csl) for csl in cslList]
return csList
def get_List(self, typeName):
if not typeName in self.__dataType:
raise KeyError("The key '{0}' is not a data type!".format(typeName))
dsList = self.get_dsList(typeName)
csList = self.get_csList(typeName)
return dsList + csList
def check_dsData(self):
"""
Return the number of discrete data sets.
"""
return len(self.__discreteSetDict.keys())
def check_csData(self):
"""
Return the number of continual data sets.
"""
return len(self.__continueSetDict.keys())
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
#Model class#
#-----------#
#The basic class of model
class ModelFunction(object):
"""
The functions of each model component. The model is multiplicative if
multiList is used.
Parameters
----------
function : string
The variable name of the function.
xName : string
The name of the active variable.
parFitDict : dict
The name of the fitting variables.
parAddDict : dict
The name of the additional variables for this model.
multiList (optional): list
If provided, the model is multiplicative. The model will be
multiplied to the models in the list. Otherwise, the model will be
added with other models that are not multiplicative.
Notes
----
Revised by SGJY at Jan. 5, 2018 in KIAA-PKU.
"""
def __init__(self, function, xName, parFitDict={}, parAddDict={}, multiList=None):
self.__function = function
self.xName = xName
self.parFitDict = parFitDict
self.parAddDict = parAddDict
self.multiList = multiList
def __call__(self, x):
kwargs = {}
#Add in the parameters for fit
kwargs[self.xName] = x
for parName in self.parFitDict.keys():
kwargs[parName] = self.parFitDict[parName]["value"]
for parName in self.parAddDict.keys():
kwargs[parName] = self.parAddDict[parName]
exec "y = sedmf.{0}(**kwargs)".format(self.__function)
return y
def if_Add(self):
"""
Check whether the function is to add or multiply.
"""
if self.multiList is None:
return True
else:
return False
def get_function_name(self):
"""
Get the function name.
"""
return self.__function
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
class ModelCombiner(object):
"""
The object to combine all the model components.
Parameters
----------
modelDict : dict (better to be ordered dict)
The dict containing all the model functions.
xList : array like
The array of the default active variable.
QuietMode : bool
Use verbose mode if True.
Notes
-----
None.
"""
def __init__(self, modelDict, xList, QuietMode=True):
self.__modelDict = modelDict
self._modelList = modelDict.keys()
self.__x = xList
self._mltList = [] # The list of models to multiply to other models
self._addList = [] # The list of models to add together
for modelName in self._modelList:
mf = modelDict[modelName]
if mf.if_Add():
self._addList.append(modelName)
else:
self._mltList.append(modelName)
if not QuietMode:
print "Add: {0}".format(self._addList)
print "Multiply: {0}".format(self._mltList)
def get_xList(self):
"""
Get the array of the default active variable.
Parameters
----------
None.
Returns
-------
The array of the default active variable.
"""
return self.__x
def set_xList(self, xList):
"""
Reset the default active variable array.
Parameters
----------
xList : array like
The new array of the active variable.
Returns
-------
None.
"""
self.__x = xList
def combineResult(self, x=None):
"""
Return the model result combining all the components.
Parameters
----------
x (optional) : array like
The active variable of the models.
Returns
-------
result : array like
The result of the models all combined.
Notes
-----
None.
"""
if x is None:
x = self.__x
#-> Calculate the add model components
addCmpDict = {}
for modelName in self._addList:
mf = self.__modelDict[modelName]
addCmpDict[modelName] = mf(x)
#-> Manipulate the model components
for modelName in self._mltList:
mf = self.__modelDict[modelName]
my = mf(x) # multiplied y component
#--> Multiply the current component to the target models
for tmn in mf.multiList:
addCmpDict[tmn] *= my
#-> Add up all the add models
result = np.zeros_like(x)
for modelName in self._addList:
result += addCmpDict[modelName]
return result
def componentResult(self, x=None):
"""
Return the results of all the add components multiplied by the
multiplicative models correspondingly.
Parameters
----------
x (optional) : array like
The active variable of the models.
Returns
-------
result : ordered dict
The result of the model components.
Notes
-----
None.
"""
if x is None:
x = self.__x
#-> Calculate the add model components
result = OrderedDict()
for modelName in self._addList:
mf = self.__modelDict[modelName]
result[modelName] = mf(x)
#-> Manipulate the model components
for modelName in self._mltList:
mf = self.__modelDict[modelName]
my = mf(x) # multiplied y component
#--> Multiply the current component to the target models
for tmn in mf.multiList:
result[tmn] *= my
return result
def componentAddResult(self, x=None):
"""
Return the original results of add models without multiplied other models.
Parameters
----------
x (optional) : array like
The active variable of the models.
Returns
-------
result : ordered dict
The result of the model components.
Notes
-----
None.
"""
if x is None:
x = self.__x
result = OrderedDict()
for modelName in self._addList:
result[modelName] = self.__modelDict[modelName](x)
return result
def componentMltResult(self, x=None):
"""
Return the original results of multiplicative models.
Parameters
----------
x (optional) : array like
The active variable of the models.
Returns
-------
result : ordered dict
The result of the model components.
Notes
-----
None.
"""
if x is None:
x = self.__x
result = OrderedDict()
for modelName in self._mltList:
result[modelName] = self.__modelDict[modelName](x)
return result
def get_modelDict(self):
"""
Get the dict of all the models.
"""
return self.__modelDict
def get_modelAddList(self):
"""
Get the name list of the add models.
"""
return self._addList
def get_modelMltList(self):
"""
Get the name list of the multiply models.
"""
return self._mltList
def get_modelParDict(self):
modelParDict = OrderedDict()
for modelName in self._modelList:
model = self.__modelDict[modelName]
modelParDict[modelName] = model.parFitDict
return modelParDict
def get_parList(self):
"""
Return the total number of the fit parameters.
"""
parList = []
for modelName in self._modelList:
model = self.__modelDict[modelName]
modelParDict = model.parFitDict
for parName in modelParDict.keys():
parList.append(modelParDict[parName]["value"])
return parList
def get_parVaryList(self):
"""
Return the total number of the fit parameters that can vary.
"""
parList = []
for modelName in self._modelList:
model = self.__modelDict[modelName]
modelParDict = model.parFitDict
for parName in modelParDict.keys():
if modelParDict[parName]["vary"]:
parList.append(modelParDict[parName]["value"])
else:
pass
return parList
def get_parVaryRanges(self):
"""
Return a list of ranges for all the variable parameters.
"""
parRList = []
for modelName in self._modelList:
model = self.__modelDict[modelName]
modelParDict = model.parFitDict
for parName in modelParDict.keys():
if modelParDict[parName]["vary"]:
parRList.append(modelParDict[parName]["range"])
else:
pass
return parRList
def get_parVaryNames(self, latex=True):
"""
Return a list of names for all the variable parameters. The latex format
is preferred. If the latex format is not found, the variable name is used.
"""
parNList = []
for modelName in self._modelList:
model = self.__modelDict[modelName]
modelParDict = model.parFitDict
for parName in modelParDict.keys():
if modelParDict[parName]["vary"]:
if latex:
name = modelParDict[parName].get("latex", parName)
else:
name = parName
parNList.append(name)
else:
pass
return parNList
def updateParFit(self, modelName, parName, parValue, QuietMode=True):
model = self.__modelDict[modelName]
if not QuietMode:
orgValue = model.parFitDict[parName]
print "[{0}][{1}] {2}->{3}".format(modelName, parName, orgValue, parValue)
if model.parFitDict[parName]["vary"]:
model.parFitDict[parName]["value"] = parValue
else:
raise RuntimeError("[ModelCombiner]: {0}-{1} is fixed!".format(modelName, parName))
def updateParList(self, parList):
"""
Updata the fit parameters from a list.
"""
counter = 0
for modelName in self._modelList:
model = self.__modelDict[modelName]
modelParDict = model.parFitDict
for parName in modelParDict.keys():
if modelParDict[parName]["vary"]:
modelParDict[parName]["value"] = parList[counter]
counter += 1
else:
pass
def updateParAdd(self, modelName, parName, parValue, QuietMode=True):
model = self.__modelDict[modelName]
if not QuietMode:
orgValue = model.parAddDict[parName]
print "[{0}][{1}] {2}->{3}".format(modelName, parName, orgValue, parValue)
model.parAddDict[parName] = parValue
def plot(self, x=None, colorList=None, FigAx=None, DisplayPars=False,
tKwargs=None, cKwargs={}, useLabel=True):
if x is None:
x = self.__x
if FigAx is None:
fig = plt.figure()
ax = plt.gca()
FigAx = (fig, ax)
else:
fig, ax = FigAx
modelDict = self.__modelDict
modelList = self.get_modelAddList() #modelDict.keys()
TextIterm = lambda text, v1, v2: text.format(v1, v2)
textList = []
#yTotal = np.zeros_like(x)
yTotal = self.combineResult(x=x)
yCmpnt = self.componentResult(x=x) #The best-fit components
if colorList is None:
colorList = ["orange", "green", "blue", "magenta", "yellow", "cyan"]
nColor = len(colorList)
counter = 0
for modelName in modelList:
textList.append( "<{0}>\n".format(modelName) )
mf = modelDict[modelName]
parFitDict = mf.parFitDict
for parName in parFitDict.keys():
textList.append( TextIterm("{0}: {1:.2f}\n", parName,
parFitDict[parName]["value"]) )
y = yCmpnt[modelName]
if useLabel:
cLabel = modelName
else:
cLabel = None
ax.plot(x, y, color=colorList[counter%nColor], label=cLabel, **cKwargs)
counter += 1
if useLabel:
tLabel = "Total"
else:
tLabel = None
if tKwargs is None:
ax.plot(x, yTotal, color="k", label=tLabel)
else:
ax.plot(x, yTotal, label=tLabel, **tKwargs)
#fig.set_size_inches(8, 6)
text = "".join(textList)
if DisplayPars:
ax.text(1.02, 1.0, text, #bbox=dict(facecolor="white", alpha=0.75),
verticalalignment="top", horizontalalignment="left",
transform=ax.transAxes, fontsize=14)
return FigAx
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
#The function generate the ModelCombiner from input model dict
def Model_Generator(input_model_dict, func_lib, x_list, par_add_dict_all={},
QuietMode=False, **kwargs):
"""
Generate the ModelCombiner object from the input model dict.
Parameters
----------
input_model_dict : dict (better to be ordered dict)
The dict of input model informations.
An example of the format of the dict elements:
"Slope": { # The name of the model is arbitrary.
"function": "Linear" # Necessary to be exactly the same as
# the name of the variable.
"a": { # The name of the first parameter.
"value": 3., # The value of the parameter.
"range": [-10., 10.], # The prior range of the parameter.
"type": "c", # The type (continuous/discrete) of
# the parameter. Currently, it does
# not matter...
"vary": True, # The toggle whether the parameter is
# fixed (if False).
"latex": "$a$", # The format for plotting.
}
"b": {...} # The same format as "a".
}
func_lib : dict
The dict of the information of the functions.
An example of the format of the dict elements:
"Linear":{ # The function name should be exactly
# the same as the name of the function
# variable it refers to.
"x_name": "x", # The active variable of the function.
"param_fit": ["a", "b"], # The name of the parameters that are
# involved in fitting.
"param_add": [], # The name of the additional parameters
# necessary for the function.
"operation": ["+"] # The operation expected for this
# function, for consistency check.
# "+": to add with other "+" components.
# "*": to multiply to other "+"
# components. One model can be both "+"
# and "*".
x_list : array like
The default active variable for the model.
par_add_dict_all : dict
The additional parameters for all the models in input_model_dict.
**kwargs : dict
Additional keywords for the ModelCombiner.
Returns
-------
sed_model : ModelCombiner object
The combined SED model.
Notes
-----
None.
"""
modelDict = OrderedDict()
modelNameList = input_model_dict.keys()
for modelName in modelNameList:
funcName = input_model_dict[modelName]["function"]
funcInfo = func_lib[funcName]
xName = funcInfo["x_name"]
#-> Build up the parameter dictionaries
parFitList = funcInfo["param_fit"]
parAddList = funcInfo["param_add"]
parFitDict = OrderedDict()
parAddDict = {}
for parName in parFitList:
parFitDict[parName] = input_model_dict[modelName][parName]
for parName in parAddList:
par_add_iterm = par_add_dict_all.get(parName, "No this parameter")
if par_add_iterm == "No this parameter":
pass
else:
parAddDict[parName] = par_add_iterm
#-> Check the consistency if the component is multiply
multiList = input_model_dict[modelName].get("multiply", None)
if not multiList is None:
#--> The "*" should be included in the operation list.
assert "*" in funcInfo["operation"]
if not QuietMode:
print "[Model_Generator]: {0} is multiplied to {1}!".format(modelName, multiList)
#--> Check further the target models are not multiplicative.
for tmn in multiList:
f_mlt = input_model_dict[tmn].get("multiply", None)
if not f_mlt is None:
raise ValueError("The multiList includes a multiplicative model ({0})!".format(tmn))
modelDict[modelName] = ModelFunction(funcName, xName, parFitDict,
parAddDict, multiList)
sed_model = ModelCombiner(modelDict, x_list, **kwargs)
return sed_model
| 31,802 | 32.266736 | 104 | py |
Fitter | Fitter-master/sedfit/fitter/template.py | import numpy as np
from sklearn.neighbors import KDTree
from sklearn.decomposition import PCA
from scipy.interpolate import splev
class Template(object):
"""
This is the object of a model template.
"""
def __init__(self, tckList, kdTree, parList, modelInfo={}, parFormat=[], readMe=""):
self.__tckList = tckList
self.__kdTree = kdTree
self.__parList = parList
self.__modelInfo = modelInfo
self.__parFormat = parFormat
self._readMe = readMe
def __call__(self, x, pars):
"""
Return the interpolation result of the template nearest the input
parameters.
"""
x = np.array(x)
ind = np.squeeze(self.__kdTree.query(np.atleast_2d(pars), return_distance=False))
tck = self.__tckList[ind]
return splev(x, tck)
def get_nearestParameters(self, pars):
"""
Return the nearest template parameters to the input parameters.
"""
ind = np.squeeze(self.__kdTree.query(np.atleast_2d(pars), return_distance=False))
return self.__parList[ind]
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, dict):
self.__dict__ = dict
def get_parList(self):
return self.__parList
def get_modelInfo(self):
return self.__modelInfo
def get_parFormat(self):
return self.__parFormat
def readme(self):
return self._readMe
def PCA_decompose(X, n_components, **kwargs):
"""
Use PCA to decompose the input templates.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input templates to decompose.
n_components : float
The number of components kept.
**kwargs : dict
The arguments of PCA function.
Returns
-------
results : dict
X_t : array-like, shape (n_samples, n_components)
The decomposed array of the input array.
cmp : array-like, shape (n_components, n_features)
The components decomposed by PCA.
Notes
-----
None.
"""
pca = PCA(n_components=n_components, **kwargs)
X_t = pca.fit_transform(X)
cmp = pca.components_
evr = pca.explained_variance_ratio_
results = {
"X_t": X_t,
"components": cmp,
"evr": evr
}
return results
def PCA_recover(idx, encoder, decoder):
"""
Recover the PCA decomposed array.
Parameters
----------
idx : float
The index of the template to be recovered.
encoder : HDF5 dataset
The decomposed results of the original data.
decoder : HDF5 dataset
The principle components of the original data.
Returns
-------
results : float array
The recovered data array.
Notes
-----
None.
"""
nSamples = encoder.attrs["nSamples"]
nComponents = encoder.attrs["nComponents"]
nFeatures = decoder.attrs["nFeatures"]
weight = encoder.value[idx, :]
components = decoder.value
result = np.zeros(nFeatures)
for loop in range(nComponents):
result += weight[loop] * components[loop, :]
return result
if __name__ == "__main__":
X = np.array([[-1, -1, -1], [-2, -1, 1], [-1, -2, 0], [1, 1, 2], [2, 1, 0], [1, 2, -1]])
results = PCA_decompose(X, 3)
X_t = results["X_t"]
cmp = results["components"]
print X_t
print cmp
| 3,426 | 25.565891 | 92 | py |
Fitter | Fitter-master/sedfit/fitter/__init__.py | 0 | 0 | 0 | py |
|
Fitter | Fitter-master/examples/config_example_pht.py | ################################################################################
## This is config is an example of photometric SED fitting.
## The data used is IRSA13120-5453 a luminous infrared galaxy.
## The adopted models are:
## BC03 -- Stellar emisison
## Smith07 -- MIR extinction
## Cat3d_H -- Dust torus extinction
## DL07 -- Cold dust emission
##
## The example is created by SGJY at Feb-17-2018 in KIAA-PKU.
################################################################################
import numpy as np
from collections import OrderedDict
################################################################################
# Data #
################################################################################
targname = "IRSA13120-5453" # Target name
redshift = 0.03076 # Redshift
distance = 143.6 # Luminosity distance (Mpc). If it is not provided, the
# luminosity distance with be calculated with the redshift
# assuming Planck (2015) cosmology, FlatLambdaCDM(H0=67.8,
# Om0=0.308).
sedFile = "examples/{0}_obs.csed".format(targname) # The path to the SED data.
dataDict = {
"phtName": "Phot", # The name of the photometric data. Use "None" if no
# photometric data is used.
"spcName": None, # The name of the spectral data. Use "None" if no
# spectral data is used.
"bandList_use": [], # The list of the band name to be used. The name should
# be consistent what are provided in data file (forth
# column). If empty, all of the bands in the data are
# used.
"bandList_ignore":[], # The list of the band name to be ignored. The name
# should be consistent what are provided in data file
# (4th column). If empty, none of the bands in the
# data are ignored.
"frame": "obs", # Note the frame of the data. The observed frame ("obs") is
# extensively used.
}
################################################################################
# Model #
################################################################################
waveModel = 10**np.linspace(-0.5, 3.0, 1000) # The wavelength array to calculate
# model SED.
#parAddDict_all = {}
modelDict = OrderedDict(
( # Each element of the dict is one model component. Remove the element to
# remove the component.
("BC03", { # Bruzual & Charlot (2003) SSP model.
"function": "BC03", # The name of functions from sedfit.models.
"logMs":{ # The log10 of stellar mass.
"value": 11.45, # Model parameter, not used in the fit.
"range": [6., 14.], # Model prior range, used to generate
# the uniform prior.
"type": "c", # Indicate the parameter to be continuum, not
# used in the fit.
"vary": True, # Indicate whether this parameter is free
# (True) or fixed (False).
"latex": r"$\mathrm{log}\,M_\mathrm{s}$", # For the
# auxiliary plot
},
"age":{
"value": 5,
"range": [0.3, 10.],
"type": "c",
"vary": False, #True, #
"latex": r"$Age$",
},
}
),
("Extinction", {
"function": "Smith07",
"logtau": {
"value": -3.0,
"range": [-4.0, 1.5], # [-4.0, 1.5],
"type": "c",
"vary": True, # False, #
"latex": r"$\mathrm{log}\,\tau_\mathrm{ext}$",
},
"multiply": ["Torus", "DL07"]
}
),
("Torus", {
"function": "Cat3d_H",
"logL": {
"value": 44.,
"range": [38., 48.],
"type": "c",
"vary": True, # False, #
"latex": r"$\mathrm{log}\,L$",
},
"i": {
"value": 30,
"range": [0, 90], # [47.0, 48.0], #
"type": "c",
"vary": True, # False, #
"latex": r"$i$",
},
"N0": {
"value": 7,
"range": [4., 11], # [6.42, 6.44], #
"type": "c",
"vary": True, # False, #
"latex": r"$N_0$",
},
"h": {
"value": 0,
"range": [0, 1.75],
"type": "c",
"vary": True, # False, #
"latex": r"$h$",
},
"a": {
"value": -0.50,
"range": [-2.75, 0.0],
"type": "c",
"vary": True, # False, #
"latex": r"$a$",
}
}
),
("DL07", {
"function": "DL07",
"logumin": {
"value": 0.0,
"range": [-1.0, 1.4], #log[0.1, 25]
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,U_\mathrm{min}$",
},
"logumax": {
"value": 6,
"range": [3, 6], #log[1e3, 1e6]
"type": "c",
"vary": False, #True, #
"latex": r"$\mathrm{log}\,U_\mathrm{max}$",
},
"qpah": {
"value": 0.47, #10**0.504,
"range": [0.3, 4.8],#10**[-1.0, 0.661],
"type": "c",
"vary": False, #True, #
"latex": r"$q_\mathrm{PAH}$",
},
"loggamma": {
"value": -1.5,
"range": [-5.0, 0.0], #[0.01, 0.03],
"type": "c",
"vary": False, #True, #
"latex": r"$\mathrm{log}\,\gamma$",
},
"logMd": {
"value": 8.70,
"range": [5.0, 11.0], #[9.0, 10.0],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,M_\mathrm{d}$",
}
}
),
)
)
parTruth = None # Whether to provide the truth of the model.
modelUnct = True # Whether to consider the model uncertainty in the fitting
unctDict = OrderedDict( # Range of the parameters to model the uncertainties.
# These parameters will be marginalized in the end.
(
("lnf" , [-10, 2]), # The prior range of the ln of the ratio between
# the uncertainty over the flux.
("lna" , [-10, 5]), # The prior range of the ln of the amplitude of the
# covariance.
("lntau", [-5, 2.5]), # The prior range of the ln of the correlation
# length of the covariance.
)
)
################################################################################
# emcee #
################################################################################
#emcee options#
#-------------#
burnin = OrderedDict( # The setup for the burnin run using emcee.
(
("sampler" , "EnsembleSampler"), # The name of the sampler. Better to
# just use "EnsembleSampler".
("nwalkers" , 128), # The number of walkers.
("iteration", [600, 600]), # The iteration of burn-in run. Reinitiallize
# the walkers around the likelihood peak.
("thin" , 1), # To thin the recorded sample. Not used unless need to
# run very long chain.
("ball-r" , 0.1), # The radius of the ball as the fraction of the full
# prior range to reinitiallize the walkers.
)
)
final = OrderedDict( # Same as burnin but a different setup.
(
("sampler" , "EnsembleSampler"),
("ntemps" , 4),
("nwalkers" , 128),
("iteration", [1000]), #[1000, 600]),
("thin" , 1),
("ball-r" , 0.01),
)
)
setup = OrderedDict( # The setup for emcee run.
(
("threads" , 4), # Number of threads used for emcee sampler.
# Not used if MPI is using.
("printfrac", 0.1), # The fraction of the lowest likelihood sample to
# be thrown away when print the status. On-the-fly
# print. Not critial.
("pslow" , 16), # The lower error bar of the posterior. On-the-fly
# print. Not critial.
("pscenter" , 50), # The median of the posterior. On-the-fly print. Not
# critial.
("pshigh" , 84), # The high error bar of the posterior. On-the-fly
# print. Not critial.
)
)
emceeDict = OrderedDict( # Emcee run strategy.
(
("BurnIn", burnin), # Use the "burnin" first.
("Final", final), # Use the "final" in the end.
("Setup", setup), # Provide more information for emcee.
)
)
#Postprocess#
#-----------#
ppDict = {
"burn-in" : 500, # The number of sampled points to throw away.
"low" : 16, # The lower error bar of the posterior.
"center" : 50, # The median of the posterior.
"high" : 84, # The high error bar of the posterior.
"nuisance": True, # Do not show the nuisance parameters if True.
"fraction": 0, # The fraction of walkers with the lowest likelihood to be
# dropped.
"savepath": "examples/results/" # The save path.
}
| 10,477 | 41.942623 | 80 | py |
Fitter | Fitter-master/examples/config_example_spc.py | ################################################################################
## This is config is an example of full SED fitting.
## The data used is IRSA13120-5453 a luminous infrared galaxy.
## The adopted models are:
## BC03 -- Stellar emisison
## Smith07 -- MIR extinction (two components applied to the latter two)
## Cat3d_H_wind -- Dust torus extinction
## DL07 -- Cold dust emission
##
## The example is created by SGJY at Feb-17-2018 in KIAA-PKU.
################################################################################
import numpy as np
from collections import OrderedDict
################################################################################
# Data #
################################################################################
targname = "IRSA13120-5453"
redshift = 0.03076
distance = 143.6 #Luminosity distance
sedFile = "examples/{0}_obs.csed".format(targname)
dataDict = {
"phtName": "Phot",
"spcName": "IRS",
"bandList_use": [],
"bandList_ignore":["WISE_w3", "WISE_w4"],
"frame": "obs",
}
################################################################################
# Model #
################################################################################
waveModel = 10**np.linspace(-0.1, 7.0, 1000)
#parAddDict_all = {}
modelDict = OrderedDict(
(
("BC03", {
"function": "BC03",
"logMs":{
"value": 11.45,
"range": [6., 14.],
"type": "c",
"vary": True,
"latex": r"$\mathrm{log}\,M_\mathrm{s}$",
},
"age":{
"value": 5,
"range": [0.3, 10.],
"type": "c",
"vary": False, #True, #
"latex": r"$Age$",
},
}
),
("Ext_torus", {
"function": "Smith07",
"logtau": {
"value": -3.0,
"range": [-4.0, 1.5], # [-4.0, 1.5],
"type": "c",
"vary": True, # False, #
"latex": r"$\mathrm{log}\,\tau_\mathrm{torus}$",
},
"multiply": ["Torus"]
}
),
("Ext_dl07", {
"function": "Smith07",
"logtau": {
"value": -3.0,
"range": [-4.0, 1.5], # [-4.0, 1.5],
"type": "c",
"vary": True, # False, #
"latex": r"$\mathrm{log}\,\tau_\mathrm{DL07}$",
},
"multiply": ["DL07"]
}
),
("Torus", {
"function": "Cat3d_H_wind",
"a": {
"value": -2.00,
"range": [-3.25, 0.0],
"type": "c",
"vary": True, # False, #
"latex": r"a",
},
"h": {
"value": 0.3,
"range": [0., 0.6],
"type": "c",
"vary": True, # False, #
"latex": r"$h$",
},
"N0": {
"value": 7,
"range": [4., 11], # [6.42, 6.44], #
"type": "c",
"vary": True, # False, #
"latex": r"$N_0$",
},
"i": {
"value": 30,
"range": [0, 90], # [47.0, 48.0], #
"type": "c",
"vary": True, # False, #
"latex": r"$i$",
},
"fwd": {
"value": 0.45,
"range": [0., 1.], # [47.0, 48.0], #
"type": "c",
"vary": True, # False, #
"latex": r"$f_{wd}$",
},
"aw": {
"value": -1.5,
"range": [-2.75, -0.25], # [47.0, 48.0], #
"type": "c",
"vary": True, # False, #
"latex": r"$a_{w}$",
},
"thetaw": {
"value": 30,
"range": [25, 50], # [47.0, 48.0], #
"type": "c",
"vary": True, # False, #
"latex": r"$\theta_{w}$",
},
"thetasig": {
"value": 10,
"range": [7., 16.], # [47.0, 48.0], #
"type": "c",
"vary": True, # False, #
"latex": r"$\theta_{\sigma}$",
},
"logL": {
"value": 44.,
"range": [38., 48.],
"type": "c",
"vary": True, # False, #
"latex": r"$\mathrm{log}\,L$",
}
}
),
("DL07", {
"function": "DL07",
"logumin": {
"value": 0.,
"range": [-1.0, 1.6], #log[0.1, 25]
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,U_\mathrm{min}$",
},
"logumax": {
"value": 6,
"range": [3, 6], #log[1e3, 1e6]
"type": "c",
"vary": False, #True, #
"latex": r"$\mathrm{log}\,U_\mathrm{max}$",
},
"qpah": {
"value": 0.47, #10**0.504,
"range": [0.3, 5.0],#10**[-1.0, 0.661],
"type": "c",
"vary": True, #False, #
"latex": r"$q_\mathrm{PAH}$",
},
"loggamma": {
"value": -2.0,
"range": [-5.0, 0.0], #[0.01, 0.03],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,\gamma$",
},
"logMd": {
"value": 8.9,
"range": [6.0, 11.0], #[9.0, 10.0],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,M_\mathrm{d}$",
}
}
),
)
)
parTruth = None #Whether to provide the truth of the model
#modelUnct = True #Whether to consider the model uncertainty in the fitting
unctDict = OrderedDict(
(
("lnf" , [-10., 2]),
("lna" , [-10., 5]),
("lntau", [-10., 0.]),
)
)
################################################################################
# emcee #
################################################################################
#emcee options#
#-------------#
burninDict = OrderedDict(
(
("sampler" , "EnsembleSampler"),
("ntemps" , 16), #The number of temperature ladders only for PTSampler.
("nwalkers" , 128), #The number of walkers.
("iteration", [600, 600]), #The iteration of burn-in run.
("thin" , 1), #To thin the recorded sample.
("ball-r" , 0.1), #The radius of the ball as the fraction of the full range.
)
)
finalDict = OrderedDict(
(
("sampler" , "EnsembleSampler"),
("ntemps" , 4),
("nwalkers" , 128),
("iteration", [1000]),
("thin" , 5),
("ball-r" , 0.01),
)
)
setupDict = OrderedDict(
(
("threads" , 4),
("printfrac", 0.1),
("pslow" , 16),
("pscenter" , 50),
("pshigh" , 84),
)
)
emceeDict = OrderedDict(
(
("Burnin", burninDict),
("Final", finalDict),
("Setup", setupDict),
)
)
#Postprocess#
#-----------#
ppDict = {
"low": 16,
"center": 50,
"high": 84,
"nuisance": True, #False, #
"fraction": 0, #The fraction of walkers to be dropped.
"burn-in": 500,
"savepath": "examples/results/"
}
| 8,441 | 32.5 | 86 | py |
Fitter | Fitter-master/gauss/dnest_discrete.py | import numpy as np
import dnest4
from dnest4.utils import rng
import fitter.basicclass as bc
from gaussian_model import MultiGaussian, GaussianModelDiscrete
import matplotlib.pyplot as plt
import cPickle as pickle
import types
def logLFunc_simple(params, data, model):
parDict = model.get_modelParDict()
pIndex = 0
for modelName in model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
parFitDict[parName]["value"] = params[pIndex]
pIndex += 1
x = np.array(data.get_List('x'))
y = np.array(data.get_List('y'))
e = np.array(data.get_List('e'))
ym = np.array(model.combineResult(x))
#Calculate the log_likelihood
s2 = e**2.
logL = -0.5 * np.sum( (y - ym)**2 / s2 + np.log(2 * np.pi * s2) )
return logL
#Load the data#
#-------------#
Nmodel = 5
dataName = "gauss{0}".format(Nmodel)
try:
fp = open("{0}.dict".format(dataName), "r")
except IOError:
raise IOError("{0} does not exist!".format(dataName))
model = pickle.load(fp)
fp.close()
xData = model["x"]
Ndata = len(xData)
fAdd = model["f_add"]
yTrue = model['y_true']
yObsr = model['y_obsr']
yErr = model['y_err']
pValue = model['parameters']
rangeList = model['ranges']
cmpList = model['compnents']
print( "#------Start fitting------#" )
print( "# Ndata: {0}".format(Ndata) )
print( "# Nmodel: {0}".format(Nmodel) )
print( "# f_add: {0}".format(fAdd) )
print( "#-------------------------#" )
#Generate the gaussian model#
#---------------------------#
fp = open("gt.dict", "r")
gt = pickle.load(fp)
fp.close()
pRangeDiscrete = {
"a": list( np.arange(5.0, 20.0, 1.0) ),
"b": list( np.arange(20.0, 580.0, 20.0) ),
"c": list( np.arange(10.0, 100.0, 5.0) )
}
gaussModel = GaussianModelDiscrete(Nmodel, pRangeDiscrete, gt)
modelDict = gaussModel.get_modelParDict()
for modelName in gaussModel._modelList:
model = modelDict[modelName]
for parName in model.keys():
parDict = model[parName]
print("{0}: {1} ({2})".format(parName, parDict["value"], parDict["range"]))
#Construct the DNest4Model#
#-------------------------#
nameList = ["p{0}".format(i) for i in range(Ndata)]
flagList = np.ones_like(xData)
dd = bc.DiscreteSet(nameList, xData, yObsr, yErr, flagList)
ddSet = {"gauss": dd}
ds = bc.DataSet(ddSet)
dn4m = bc.DNest4Model(ds, gaussModel, logLFunc_simple)
#print dn4m.log_likelihood([10., 320., 43.])
# Create a model object and a sampler
sampler = dnest4.DNest4Sampler(dn4m,
backend=dnest4.backends.CSVBackend(".",
sep=" "))
# Set up the sampler. The first argument is max_num_levels
gen = sampler.sample(max_num_levels=30, num_steps=1000, new_level_interval=10000,
num_per_step=10000, thread_steps=100,
num_particles=5, lam=10, beta=100, seed=1234)
# Do the sampling (one iteration here = one particle save)
for i, sample in enumerate(gen):
print("# Saved {k} particles.".format(k=(i+1)))
# Run the postprocessing
dnest4.postprocess()
#Rename the posterior sample file name#
#-------------------------------------#
os.rename("posterior_sample.txt", "{0}_d_posterior.txt")
| 3,245 | 30.211538 | 83 | py |
Fitter | Fitter-master/gauss/dnest_continual.py | import os
import numpy as np
import dnest4
from dnest4.utils import rng
import fitter.basicclass as bc
from gaussian_model import MultiGaussian, GaussianModelSet, GaussFunc
import matplotlib.pyplot as plt
import cPickle as pickle
import types
#DNest4 model#
#------------#
class DNest4Model(object):
"""
Specify the model
"""
def __init__(self, data, model, logl):
if isinstance(data, bc.DataSet):
self.__data = data
if isinstance(model, bc.ModelCombiner):
self.__model = model
if isinstance(logl, types.FunctionType):
self._logl = logl
def from_prior(self):
"""
The prior of all the parameters are uniform.
"""
parList = []
parDict = self.__model.get_modelParDict()
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
parRange = parFitDict[parName]["range"]
parType = parFitDict[parName]["type"]
if parType == "c":
#print "[DN4M]: continual"
r1, r2 = parRange
p = (r2 - r1) * rng.rand() + r1 #Uniform distribution
elif parType == "d":
#print "[DN4M]: discrete"
p = np.random.choice(parRange, 1)[0]
else:
raise TypeError("The parameter type '{0}' is not recognised!".format(parType))
parList.append(p)
parList = np.array(parList)
return parList
def perturb(self, params):
"""
Each step we perturb all the parameters which is more effective from
computation point of view.
"""
logH = 0.0
parDict = self.__model.get_modelParDict()
pIndex = 0
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
parRange = parFitDict[parName]["range"]
parType = parFitDict[parName]["type"]
if parType == "c":
#print "[DN4M]: continual"
r1, r2 = parRange
params[pIndex] += (r2 - r1) * dnest4.randh() #Uniform distribution
if (params[pIndex] < r1) or (params[pIndex] > r2):
#print "[DNest4Model]: perturb out boundary!"
logH -= np.inf
elif parType == "d":
#print "[DN4M]: discrete"
rangeLen = len(parRange)
iBng = -1 * parRange.index(params[pIndex])
iPar = iBng + rng.randint(rangeLen)
params[pIndex] = parRange[iPar]
if not params[pIndex] in parRange:
#print "[DNest4Model]: perturb out boundary!"
logH -= np.inf
else:
raise TypeError("The parameter type '{0}' is not recognised!".format(parType))
parFitDict[parName]["value"] = params[pIndex]
pIndex += 1
return logH
def log_likelihood(self, params):
"""
Gaussian sampling distrubution.
"""
logL = self._logl(params, self.__data, self.__model)
return logL
def logLFunc_simple(params, data, model):
parDict = model.get_modelParDict()
pIndex = 0
for modelName in model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
parFitDict[parName]["value"] = params[pIndex]
pIndex += 1
x = np.array(data.get_List('x'))
y = np.array(data.get_List('y'))
e = np.array(data.get_List('e'))
ym = np.array(model.combineResult(x))
#Calculate the log_likelihood
s2 = e**2.
logL = -0.5 * np.sum( (y - ym)**2 / s2 + np.log(2 * np.pi * s2) )
return logL
#Load the data#
#-------------#
Nmodel = 5
dataName = "gauss{0}".format(Nmodel)
try:
fp = open("{0}.dict".format(dataName), "r")
except IOError:
raise IOError("{0} does not exist!".format(dataName))
data = pickle.load(fp)
fp.close()
xData = data["x"]
Ndata = len(xData)
fAdd = data["f_add"]
yTrue = data['y_true']
yObsr = data['y_obsr']
yErr = data['y_err']
pValue = data['parameters']
rangeList = data['ranges']
cmpList = data['compnents']
print( "#------Start fitting------#" )
print( "# Ndata: {0}".format(Ndata) )
print( "# Nmodel: {0}".format(Nmodel) )
print( "# f_add: {0}".format(fAdd) )
print( "#-------------------------#" )
#Generate the gaussian model#
#---------------------------#
gaussModel = GaussianModelSet(pValue, rangeList)
modelDict = gaussModel.get_modelParDict()
for modelName in gaussModel._modelList:
model = modelDict[modelName]
for parName in model.keys():
parDict = model[parName]
print("{0}: {1} ({2[0]}, {2[1]})".format(parName, parDict["value"], parDict["range"]))
#Construct the DNest4Model#
#-------------------------#
nameList = ["p{0}".format(i) for i in range(Ndata)]
flagList = np.ones_like(xData)
dd = bc.DiscreteSet(nameList, xData, yObsr, yErr, flagList)
ddSet = {"gauss": dd}
ds = bc.DataSet(ddSet)
dn4m = DNest4Model(ds, gaussModel, logLFunc_simple)
#print dn4m.log_likelihood([10., 320., 43.])
# Create a model object and a sampler
sampler = dnest4.DNest4Sampler(dn4m,
backend=dnest4.backends.CSVBackend(".",
sep=" "))
# Set up the sampler. The first argument is max_num_levels
gen = sampler.sample(max_num_levels=30, num_steps=1000, new_level_interval=10000,
num_per_step=10000, thread_steps=100,
num_particles=5, lam=10, beta=100, seed=1234)
# Do the sampling (one iteration here = one particle save)
for i, sample in enumerate(gen):
print("# Saved {k} particles.".format(k=(i+1)))
# Run the postprocessing
dnest4.postprocess()
#Rename the posterior sample file name#
#-------------------------------------#
os.rename("posterior_sample.txt", "{0}_c_posterior.txt".format(dataName))
| 6,165 | 34.034091 | 98 | py |
Fitter | Fitter-master/gauss/mock_data_gauss.py | import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from gaussian_model import MultiGaussian, GaussianModelDiscrete
#Generate the mock data#
#-----------------#
Ndata = 50
xMax = 800.0
Nmodel = 10
fAdd = None #0.1
pRange = [
[5.0, 20.0], #The range of a
[20.0, 580.0], #The range of b
[50.0, 200.0], #The range of c
]
print( "#------Start fitting------#" )
print( "# Ndata: {0}".format(Ndata) )
print( "# Nmodel: {0}".format(Nmodel) )
print( "# f_add: {0}".format(fAdd) )
print( "#-------------------------#" )
xData = np.linspace(1.0, xMax, Ndata)
model = MultiGaussian(xData, pRange, Nmodel, fAdd)
yTrue = model['y_true']
yObsr = model['y_obsr']
yErr = model['y_err']
pValue = model['parameters']
rangeList = model['ranges']
cmpList = model['compnents']
model['x'] = xData
for p in pValue:
print("a: {0[0]}, b: {0[1]}, c:{0[2]}".format(p))
fileName = "gauss{0}.dict".format(Nmodel)
fp = open(fileName, "w")
pickle.dump(model, fp)
fp.close()
print("{0} is saved!".format(fileName))
fig = plt.figure()
plt.errorbar(xData, yObsr, yerr=yErr, fmt=".k")
plt.plot(xData, yTrue, linewidth=1.5, color="k")
for y in cmpList:
plt.plot(xData, y, linestyle='--')
plt.savefig("gauss{0}.pdf".format(Nmodel))
plt.close()
| 1,263 | 24.795918 | 63 | py |
Fitter | Fitter-master/gauss/plot_result_discrete.py | import numpy as np
import matplotlib.pyplot as plt
from gaussian_model import GaussFunc
import cPickle as pickle
fp = open("test_model.dict", "r")
model = pickle.load(fp)
fp.close()
ps = np.loadtxt("posterior_sample.txt")
xd = model['x']
yTrue = model['y_true']
yObsr = model['y_obsr']
yErr = model['y_err']
pValue = model['parameters']
cmpList = model['compnents']
nGauss = len(pValue)
for loop in range(nGauss):
print "{0[0]}, {0[1]}, {0[2]}".format(pValue[loop])
#Calculate the optimized paramter values
parRangeList = map( lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(ps, [16, 50, 84], axis=0)) )
parRangeList = np.array(parRangeList)
#(nmodel, npar, npercent)
parRangeList = parRangeList.reshape((nGauss, -1, 3))
#Sort the 50% and sort others according to it
a50List = parRangeList[:, 0, 0]
sortIndx = np.argsort(a50List)
parList = []
for loop_pc in range(3):
parPcList = []
for loop_par in range(3):
pList = parRangeList[:, loop_par, loop_pc][sortIndx]
parPcList.append(pList)
parList.append(parPcList)
parList = np.array(parList)
par50List= []
par16List= []
par84List= []
print("Fitting results:")
for loop in range(nGauss):
prA = parList[:, 0, loop]
prB = parList[:, 1, loop]
prC = parList[:, 2, loop]
par50List.append( (prA[0], prB[0], prC[0]) )
par84List.append( (prA[1], prB[1], prC[1]) )
par16List.append( (prA[2], prB[2], prC[2]) )
a_true, b_true, c_true = pValue[loop]
print( "a_{0}: {1[0]}+{1[1]}-{1[2]} (True: {2})".format(loop, prA, a_true) )
print( "b_{0}: {1[0]}+{1[1]}-{1[2]} (True: {2})".format(loop, prB, b_true) )
print( "c_{0}: {1[0]}+{1[1]}-{1[2]} (True: {2})".format(loop, prC, c_true) )
print("-----------------")
fig = plt.figure()
plt.errorbar(xd, yObsr, yerr=yErr, fmt=".k")
plt.plot(xd, yTrue, linewidth=1.5, color="k")
for y in cmpList:
plt.plot(xd, y, linestyle='--')
xm = np.linspace(1., 1000., 1000)
ym = np.zeros_like(xm)
for loop in range(nGauss):
a50, b50, c50 = par50List[loop]
y50 = GaussFunc(a50, b50, c50, xm)
ym += y50
a84, b84, c84 = par84List[loop]
y84 = GaussFunc(a50+a84, b50+b84, c50+c84, xm)
a16, b16, c16 = par16List[loop]
y16 = GaussFunc(a50-a16, b50-b16, c50-c16, xm)
plt.plot(xm, y50, color="r")
plt.fill_between(xm, y16, y84, color="r", alpha=0.3)
plt.plot(xm, ym, color="r")
#plt.xlim([0, 100])
#plt.ylim([0, 800])
plt.show()
| 2,434 | 29.4375 | 80 | py |
Fitter | Fitter-master/gauss/gaussian_model.py | import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from scipy.interpolate import interp1d
import fitter.basicclass as bc
def GaussFunc(a, b, c, x):
return a * np.exp( -0.5 * ( (x - b) / c )**2. )
def MultiGaussian(x, p_range, n_model, f_add=None, QuietMode=True):
"""
Generate a model with the combination of a number of Gaussian models.
a is randomly chosen in the entire range for all the components.
b is separated in small ranges so that the previous model is always in
the front of the next model. And the values are randomly chosen in the log
space.
c is randomly chosen in the log space of the range.
Parameters
----------
x : float array
The x of the Gaussian function.
p_range : list
The list of ranges for the three parameters of GaussFunc().
Format: [[ra1, ra2], [rb1, rb2], [rc1, rc2]]
n_model : int
The number of Gaussian models.
f_add : float
The additional uncertainty that is not included in the yerr.
QuietMode : bool
The toggle that does not print if true.
Returns
-------
model_pkg : dict
The package of model.
x: float array
The x of the model.
y_true: float array
The true model without uncertainty.
y_obsr: float array
The observed data with uncertainty.
y_err: float array
The error uncertainty of the data.
parameters: list of tuples.
The parameter list of (a, b, c)
compnents: list of float arrays.
The model y list.
ranges: list of lists.
The list of parameter ranges [p1, p2].
f_add: float
The fraction of y data that cannot explained by the model.
Notes
-----
None.
"""
x_max = np.max(x)
n_data = len(x)
b1, b2 = p_range[1] #The range of the parameters
rangeB = (b2 - b1) * np.random.rand(n_model - 1) + b1
rangeB = np.sort( np.append(rangeB, [b1, b2]) )
rangeList = [p_range[0], rangeB, p_range[2]]
parList = [] #Record the parameters
cmpList = [] #Record the models
y_true = np.zeros_like(x)
if not QuietMode:
print("----------------------")
print("Set model parameters:")
#Set the model parameters in the parList and save the model components
#in the cmpList
for loop in range(n_model):
ra1, ra2 = rangeList[0]
a = (ra2 - ra1) * np.random.rand() + ra1
rb1, rb2 = rangeList[1][loop:(loop+2)]
lb1, lb2 = [np.log(rb1), np.log(rb2)]
b = np.exp( (lb2 - lb1) * np.random.rand() + lb1 )
rc1, rc2 = rangeList[2]
lc1, lc2 = [np.log(rc1), np.log(rc2)]
c = np.exp( (lc2 - lc1) * np.random.rand() + lc1 )
if not QuietMode:
print("a_{0}: {1:.3f} in ({2}, {3})".format(loop, a, ra1, ra2))
print("b_{0}: {1:.3f} in ({2}, {3})".format(loop, b, rb1, rb2))
print("c_{0}: {1:.3f} in ({2}, {3})".format(loop, c, rc1, rc2))
parList.append( (a, b, c) )
y = GaussFunc(a, b, c, x)
cmpList.append(y)
y_true += y
#Add errors
yerr = 0.5 + 1.5 * np.random.rand(n_data)
y_obsr = y_true.copy()
if not f_add is None:
y_obsr += np.abs(f_add * y_obsr) * np.random.randn(n_data)
y_obsr += yerr * np.random.randn(n_data)
model_pkg = {
'x': x,
'y_true': y_true,
'y_obsr': y_obsr,
'y_err': yerr,
'parameters': parList,
'compnents': cmpList,
'ranges': rangeList,
'f_add': f_add,
}
return model_pkg
def GaussianModelSet(p_value, range_list):
"""
Setup a multi-gaussian model to be used in fitting.
parameters
----------
p_value : list of tuples
The parameter values (a, b, c) forming a tuple stored in the list.
range_list : list
The range of parameters for each model component. Format: [range_a,
range_b, range_c]. The range_* is a list containing the sections
of the number of the models.
Returns
-------
gaussModel : class ModelCombiner()
The combined model of Gaussian functions.
Notes
-----
None.
"""
n_model = len(p_value)
parNameList = ['a', 'b', 'c']
modelNameRoot = 'G{0}'
modelDict = OrderedDict()
for loop_m in range(n_model):
#For each parameter, add the information.
parFitDict = OrderedDict()
for loop_pn in range(3):
if len(range_list[loop_pn]) > 2:
rp1, rp2 = range_list[loop_pn][loop_m:(loop_m+2)]
else:
rp1, rp2 = range_list[loop_pn]
parFitDict[parNameList[loop_pn]] = {
'value': p_value[loop_m][loop_pn],
'range': [rp1, rp2],
'type': 'c', #The parameter type discrete or continual.
'vary': True,
}
gauss = bc.ModelFunction(GaussFunc, 'x', parFitDict)
modelDict[modelNameRoot.format(loop_m)] = gauss
gaussModel = bc.ModelCombiner(modelDict)
return gaussModel
def GaussDiscrete(a, b, c, x, tmplt):
"""
The Gaussian function with discrete paramters using a template.
"""
fltrA = tmplt["a"] == a
fltrB = tmplt["b"] == b
fltrC = tmplt["c"] == c
fltr = fltrA & fltrB & fltrC
if np.sum(fltr) == 0:
raise ValueError("The parameters are not on the grids!")
xt = tmplt[fltr][0]["x"]
yt = tmplt[fltr][0]["y"]
y = interp1d(xt, yt)(x)
return y
def GaussianModelDiscrete(n_model, range_dict, tmplt):
"""
Setup a multi-gaussian model to be used in fitting.
parameters
----------
range_dict : dict
The dict of the range for each paramter.
tmplt : ndarray
The template of Gaussian functions.
Returns
-------
gaussModel : class ModelCombiner()
The combined model of Gaussian functions.
Notes
-----
None.
"""
parNameList = ['a', 'b', 'c']
parValueList = [10.0, 200.0, 50.0]
modelNameRoot = 'G{0}'
modelDict = OrderedDict()
for loop_m in range(n_model):
#For each parameter, add the information.
parFitDict = OrderedDict()
for loop_pn in range(3):
parName = parNameList[loop_pn]
parFitDict[parName] = {
'value': parValueList[loop_pn],
'range': range_dict[parName],
'type': 'd', #The parameter type discrete or continual.
'vary': True,
}
parAddDict = {"tmplt": tmplt}
#gauss = bc.ModelFunction(GaussDiscrete, 'x', parFitDict, parAddDict)
gauss = bc.ModelFunction(GaussFunc, 'x', parFitDict)
modelDict[modelNameRoot.format(loop_m)] = gauss
gaussModel = bc.ModelCombiner(modelDict)
return gaussModel
if __name__ == "__main__":
import cPickle as pickle
fp = open("gt.dict", "r")
gt = pickle.load(fp)
fp.close()
a = 7.0
b = 260.0
c = 65.0
x = np.linspace(1.0, 1000.0, 500)
yt = GaussDiscrete(a, b, c, x, gt)
ym = GaussFunc(a, b, c, x)
plt.plot(x, yt, "k", linewidth=1.5)
plt.plot(x, ym, ":r", linewidth=1.5)
plt.show()
| 7,247 | 31.213333 | 78 | py |
Fitter | Fitter-master/gauss/plot_result_continual.py | import sys
import numpy as np
import matplotlib.pyplot as plt
from gaussian_model import GaussFunc
import cPickle as pickle
dataName = sys.argv[1]
fp = open("{0}.dict".format(dataName), "r")
model = pickle.load(fp)
fp.close()
ps = np.loadtxt("{0}_c_posterior.txt".format(dataName))
xd = model['x']
yTrue = model['y_true']
yObsr = model['y_obsr']
yErr = model['y_err']
pValue = model['parameters']
cmpList = model['compnents']
nGauss = len(pValue)
for loop in range(nGauss):
print "{0[0]}, {0[1]}, {0[2]}".format(pValue[loop])
#Calculate the optimized paramter values
parRangeList = map( lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(ps, [16, 50, 84], axis=0)) )
#"""
par50List= []
par16List= []
par84List= []
print("Fitting results:")
for loop in range(nGauss):
na = loop * 3 + 0
nb = loop * 3 + 1
nc = loop * 3 + 2
prA = parRangeList[na]
prB = parRangeList[nb]
prC = parRangeList[nc]
par50List.append( (prA[0], prB[0], prC[0]) )
par84List.append( (prA[1], prB[1], prC[1]) )
par16List.append( (prA[2], prB[2], prC[2]) )
a_true, b_true, c_true = pValue[loop]
print( "a_{0}: {1[0]}+{1[1]}-{1[2]} (True: {2})".format(loop, prA, a_true) )
print( "b_{0}: {1[0]}+{1[1]}-{1[2]} (True: {2})".format(loop, prB, b_true) )
print( "c_{0}: {1[0]}+{1[1]}-{1[2]} (True: {2})".format(loop, prC, c_true) )
print("-----------------")
fig = plt.figure()
plt.errorbar(xd, yObsr, yerr=yErr, fmt=".k")
plt.plot(xd, yTrue, linewidth=1.5, color="k")
for y in cmpList:
plt.plot(xd, y, linestyle='--')
xm = np.linspace(1., 800., 1000)
ym = np.zeros_like(xm)
cl = ["r", "y", "b", "g", "m", "c"]
for loop in range(nGauss):
a50, b50, c50 = par50List[loop]
y50 = GaussFunc(a50, b50, c50, xm)
ym += y50
a84, b84, c84 = par84List[loop]
y84 = GaussFunc(a50+a84, b50+b84, c50+c84, xm)
a16, b16, c16 = par16List[loop]
y16 = GaussFunc(a50-a16, b50-b16, c50-c16, xm)
plt.plot(xm, y50, color=cl[loop%len(cl)])
plt.fill_between(xm, y16, y84, color=cl[loop%len(cl)], alpha=0.3)
plt.plot(xm, ym, color="grey", linewidth=1.5)
#plt.xlim([0, 100])
#plt.ylim([0, 800])
plt.savefig("{0}_fit.pdf".format(dataName))
plt.show()
#"""
| 2,219 | 29 | 80 | py |
Fitter | Fitter-master/template/bc03_grid.py | import ezgal
import numpy as np
from sedfit.fitter.template import Template
from sklearn.neighbors import KDTree
from scipy.interpolate import splrep, splev
import matplotlib.pyplot as plt
import cPickle as pickle
ls_mic = 2.99792458e14 #micron/s
ls_aa = 2.99792458e18 #aa/s
Mpc = 3.08567758e24 #cm
Lsun = 3.828e33 #erg/s
mJy = 1e26 #mJy
def Stellar_SED(logMs, age, wave, zs=0.01, band="h", zf_guess=1.0, spsmodel="bc03_ssp_z_0.02_chab.model"):
"""
This function obtain the galaxy stellar SED given the stellar mass and age.
The default model is Bruzual & Charlot (2003) with solar metallicity and
Chabrier IMF. The stellar synthesis models are organised by the module EzGal
(http://www.baryons.org/ezgal/). The function is consistent with BC03 templates
(tested by SGJY in Oct. 6, 2016).
Parameters
----------
logMs : float
The stellar mass in log10 of solar unit.
age : float
The age of the galaxy, in the unit of Gyr.
wave : array
The sed wavelength corresponding to the sedflux. In units micron.
zs : float, default: 0.01
The redshift of the source. It does not matter which value zs is, since
the template is in the rest frame and the unit is erg/s/Hz.
band : str, default: "h"
The reference band used to calculate the mass-to-light ratio.
zf_guess : float. zf_guess=1.0 by default.
The initial guess to solve the zf that allowing the age between
zs and zf is as required.
spsmodel : string. spsmodel="bc03_ssp_z_0.02_chab.model" by default.
The stellar population synthesis model that is used.
Returns
-------
flux : array
The sed flux of the stellar emission. In units erg/s/Hz.
Notes
-----
None.
"""
import ezgal #Import the package for stellar synthesis.
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
ls_mic = 2.99792458e14 #micron/s
mJy = 1e26 #mJy
Mpc = 3.08567758e24 #cm
model = ezgal.model(spsmodel) #Choose a stellar population synthesis model.
model.set_cosmology(Om=0.308, Ol=0.692, h=0.678)
func_age = lambda zf, zs, age: age - model.get_age(zf, zs) #To solve the formation redshift given the
#redshift of the source and the stellar age.
func_MF = lambda Msun, Mstar, m2l: Msun - 2.5*np.log10(Mstar/m2l) #Calculate the absolute magnitude of
#the galaxy. Msun is the absolute mag
#of the sun. Mstar is the mass of the
#star. m2l is the mass to light ratio.
func_flux = lambda f0, MF, mu: f0 * 10**(-0.4*(MF + mu)) #Calculate the flux density of the galaxy. f0
#is the zero point. MF is the absolute magnitude
#of the galaxy at certain band. mu is the distance
#module.
Ms = 10**logMs #Calculate the stellar mass.
age_up = model.get_age(1500., zs)
if age > age_up:
raise ValueError("The age is too large!")
zf = fsolve(func_age, zf_guess, args=(zs, age)) #Given the source redshift and the age, calculate the redshift
#for the star formation.
Msun_H = model.get_solar_rest_mags(nzs=1, filters=band, ab=True) #The absolute magnitude of the Sun in given band.
m2l = model.get_rest_ml_ratios(zf, band, zs) #Calculate the mass-to-light ratio.
M_H = func_MF(Msun_H, Ms, m2l) #The absolute magnitude of the galaxy in given band.
#Calculate the flux at given band for comparison.
f0 = 3.631e6 #Zero point of AB magnitude, in unit of mJy.
mu = model.get_distance_moduli(zs) #The distance module
flux_H = func_flux(f0, M_H, mu) #In unit mJy.
wave_H = 1.6448 #Pivot wavelength of given band, in unit of micron.
#Obtain the SED
wave_rst = model.ls / 1e4 #In unit micron.
flux_rst = model.get_sed(age, age_units="gyrs", units="Fv") #The unit is not important
#since it will be normalized.
wave_ext = np.linspace(200, 1000, 30)
flux_ext = np.zeros(30)
wave_extd = np.concatenate([wave_rst, wave_ext])
flux_extd = np.concatenate([flux_rst, flux_ext])
#Normalize the SED at the given band.
#The normalization provided by EzGal is not well understood, so I do not use it.
f_int = interp1d(wave_extd, flux_extd)
f_H = f_int(wave_H)
flux = flux_extd * flux_H/f_H #In unit mJy.
dm = model.get_distance_moduli(zs, nfilters=1)
DL = 10**(1. + dm / 5.) / 1e6 * Mpc #In unit cm
sedflux = f_int(wave) * flux_H/f_H * (4 * np.pi * DL**2) / mJy #In unit: erg/s/Hz
#return sedflux, wave_extd, flux_extd, wave_H, flux_H #For debug
return sedflux
#Generate the templates of stellar emission templates
logMs = 0.0
#ageList = 10**np.linspace(8.5, 10., 40)
ageList = np.array([0.5, 1.0, 3.0, 5.0, 9.0])
nAge = len(ageList)
wave = 10**np.linspace(-2, 3, 3000)
fluxList = []
for loop in range(nAge):
fluxList.append(Stellar_SED(logMs, ageList[loop], wave))
fluxList = np.array(fluxList)
print(fluxList.shape)
#Interpolate with KD tree and spline interpolation
XList = []
tckList = []
for loop in range(nAge):
age = ageList[loop]
flux = fluxList[loop, :]
tck = splrep(wave, flux)
tckList.append(tck)
XList.append([age])
kdt = KDTree(XList)
print("Interpolation finishes!")
modelInfo = {
"age": ageList,
}
parFormat = ["age"]
readMe = '''
The stellar emission templates are generated with EzGal.
'''
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
t = Template(**templateDict)
fp = open("bc03_kdt.tmplt", "w")
pickle.dump(templateDict, fp)
fp.close()
#Test
cl = ['r', 'g', 'b', 'y', 'c']
for loop in range(nAge):
pars = [ageList[loop]]
f = t(wave, pars)
flux = fluxList[loop]
plt.plot(wave, f, color=cl[loop], linestyle="--")
plt.plot(wave, flux, color=cl[loop], linestyle=":")
plt.ylim([1e14, 1e20])
plt.xscale("log")
plt.yscale("log")
plt.show()
| 6,572 | 40.339623 | 118 | py |
Fitter | Fitter-master/template/tb_dl07.py | import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
#from sedfit.fitter.template import Template
from sklearn.neighbors import KDTree
from scipy.interpolate import splrep, splev
#'''
class Template(object):
"""
This is the object of a model template.
"""
def __init__(self, tckList, kdTree, parList, modelInfo={}, parFormat=[], readMe=""):
self.__tckList = tckList
self.__kdTree = kdTree
self.__parList = parList
self.__modelInfo = modelInfo
self.__parFormat = parFormat
self._readMe = readMe
def __call__(self, x, pars):
"""
Return the interpolation result of the template nearest the input
parameters.
"""
x = np.array(x)
ind = np.squeeze(self.__kdTree.query(np.atleast_2d(pars), return_distance=False))
tck = self.__tckList[ind]
return splev(x, tck)
def get_nearestParameters(self, pars):
"""
Return the nearest template parameters to the input parameters.
"""
ind = np.squeeze(self.__kdTree.query(np.atleast_2d(pars), return_distance=False))
return self.__parList[ind]
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, dict):
self.__dict__ = dict
def get_parList(self):
return self.__parList
def get_modelInfo(self):
return self.__modelInfo
def get_parFormat(self):
return self.__parFormat
def readme(self):
return self._readMe
#'''
f_test = 1
f_compile = 0
if f_compile:
fp = open("/Users/jinyi/Work/PG_QSO/templates/DL07spec/dl07.tmplt", "r")
tmpl_dl07 = pickle.load(fp)
fp.close()
uminList = [0.10, 0.15, 0.20, 0.30, 0.40, 0.50, 0.70, 0.80, 1.00, 1.20,
1.50, 2.00, 2.50, 3.00, 4.00, 5.00, 7.00, 8.00, 10.0, 12.0,
15.0, 20.0, 25.0]
umaxList = [1e3, 1e4, 1e5, 1e6]
qpahList = [0.47, 1.12, 1.77, 2.50, 3.19, 3.90, 4.58, 0.75, 1.49, 2.37, 0.10]
mdust2mh = [0.01, 0.01, 0.0101, 0.0102, 0.0102, 0.0103, 0.0104, 0.00343,
0.00344, 0.00359, 0.00206]
XList = []
tckList = []
counter = 0
for umin in uminList:
umaxList_exp = [umin] + umaxList
for umax in umaxList_exp:
for loop_qpah in range(len(qpahList)):
qpah = qpahList[loop_qpah]
mdmh = mdust2mh[loop_qpah]
fltr_umin = tmpl_dl07["umin"] == umin
fltr_umax = tmpl_dl07["umax"] == umax
fltr_qpah = tmpl_dl07["qpah"] == qpah
fltr = fltr_umin & fltr_umax & fltr_qpah
wave = tmpl_dl07[fltr]["wavesim"][0]
flux = tmpl_dl07[fltr]["fluxsim"][0]
sortIndex = np.argsort(wave)
wave = wave[sortIndex]
flux = flux[sortIndex]
tck = splrep(wave, flux)
tckList.append(tck)
XList.append([umin, umax, qpah])
counter += 1
kdt = KDTree(XList)
print("Interpolation finishes!")
wavelength = tmpl_dl07[0]["wavesim"]
modelInfo = {
"umin": uminList,
"umax": umaxList,
"qpah": qpahList,
"mdmh": mdust2mh,
"wavelength": wavelength,
}
parFormat = ["umin", "umax", "qpah"]
readMe = '''
This template is from: http://www.astro.princeton.edu/~draine/dust/irem.html
The interpolation is tested well!
'''
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
print("haha")
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
print("haha")
t = Template(**templateDict)
print("haha")
fp = open("dl07_kdt.tmplt", "w")
#pickle.dump(t, fp)
pickle.dump(templateDict, fp)
fp.close()
if f_test:
fp = open("dl07_kdt.tmplt", "r")
tpDict = pickle.load(fp)
fp.close()
fp = open("/Users/jinyi/Work/PG_QSO/templates/DL07spec/dl07.tmplt", "r")
tmpl_dl07 = pickle.load(fp)
fp.close()
t = Template(**tpDict)
x = 10**np.linspace(0, 3, 1000)
pars = [0.44, 2e6, 2.3]
for i in range(100): #range(len(tmpl_dl07)):
umin = tmpl_dl07[i]["umin"]
umax = tmpl_dl07[i]["umax"]
qpah = tmpl_dl07[i]["qpah"]
wave = tmpl_dl07[i]["wavesim"]
flux = tmpl_dl07[i]["fluxsim"]
pars = [umin, umax, qpah]
flux_intp = t(wave, pars)
print np.max(abs(flux-flux_intp))
print(t.get_parFormat())
print(t.readme())
print t.get_nearestParameters(pars)
| 4,747 | 29.632258 | 89 | py |
Fitter | Fitter-master/template/tb_clumpy.py | import h5py
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from sedfit.fitter.template import Template
from sklearn.neighbors import KDTree
from scipy.interpolate import splrep, splev
from collections import Counter
ls_mic = 2.99792458e14 #micron/s
f_test = 1
f_compile = 0
qList = [0., 1., 2., 3.]
N0List = [1., 4., 8., 12., 15.]
tvList = [10., 40., 80., 160., 300.]
sigList = [15., 30., 45., 60., 70.]
YList = [5., 10., 30., 60., 100.]
iList = [0., 20., 40., 70., 90.]
length = len(qList) * len(N0List) * len(tvList) * len(sigList) * len(YList) * len(iList)
h = h5py.File('/Users/jinyi/Work/PG_QSO/templates/clumpy_models_201410_tvavg.hdf5')
h_q = h['q'].value
h_N0 = h['N0'].value
h_tv = h['tv'].value
h_sig = h['sig'].value
h_Y = h['Y'].value
h_i = h['i'].value
wave = h['wave'].value
flux_tor = h["flux_tor"].value
nu = ls_mic / wave
if f_compile:
kq = np.sort(Counter(h_q).keys())
kN0 = np.sort(Counter(h_N0).keys())
ktv = np.sort(Counter(h_tv).keys())
ksig = np.sort(Counter(h_sig).keys())
kY = np.sort(Counter(h_Y).keys())
ki = np.sort(Counter(h_i).keys())
XList = []
tckList = []
counter = 0
for q in qList:
for N0 in N0List:
for tv in tvList:
for sig in sigList:
for Y in YList:
for i in iList:
if (counter+1) % 100 == 0:
print "[{0}%]".format(100. * (counter+1)/length)
counter += 1
f_q = h_q == q
f_N0 = h_N0 == N0
f_tv = h_tv == tv
f_sig = h_sig == sig
f_Y = h_Y == Y
f_i = h_i == i
fltr = f_q & f_N0 & f_tv & f_sig & f_Y & f_i
flux = flux_tor[fltr][0] / nu
norm = np.trapz(flux, nu)
flux_norm = flux/abs(norm)
tck = splrep(wave, flux_norm)
tckList.append(tck)
XList.append([q, N0, tv, sig, Y, i])
kdt = KDTree(XList)
print("Interpolation finishes!")
modelInfo = {
"q": qList,
"N0": N0List,
"tv": tvList,
"sigma": sigList,
"Y": YList,
"i": iList,
"wavelength": wave,
}
parFormat = ["q", "N0", "tv", "sigma", "Y", "i"]
readMe = '''
This template is from: http://www.pa.uky.edu/clumpy/
The interpolation is tested well!
'''
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
print("haha")
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
print("haha")
t = Template(**templateDict)
print("haha")
fp = open("clumpy_kdt.tmplt", "w")
#pickle.dump(t, fp)
pickle.dump(templateDict, fp)
fp.close()
if f_test:
fp = open("clumpy_kdt.tmplt", "r")
tpDict = pickle.load(fp)
fp.close()
t = Template(**tpDict)
counter = 0
for q in qList:
for N0 in N0List:
for tv in tvList:
for sig in sigList:
for Y in YList:
for i in iList:
if (counter+1) % 100 == 0:
print "[{0}%]".format(100. * (counter+1)/length)
counter += 1
f_q = h_q == q
f_N0 = h_N0 == N0
f_tv = h_tv == tv
f_sig = h_sig == sig
f_Y = h_Y == Y
f_i = h_i == i
fltr = f_q & f_N0 & f_tv & f_sig & f_Y & f_i
flux = flux_tor[fltr][0] / nu
norm = np.trapz(flux, nu)
flux_norm = flux/abs(norm)
pars = [q, N0, tv, sig, Y, i]
flux_intp = t(wave, pars)
print np.max(abs(flux_norm-flux_intp)/flux_intp)
if counter > 100:
break
print(t.get_parFormat())
print(t.readme())
print t.get_nearestParameters(pars)
| 4,532 | 32.087591 | 88 | py |
Fitter | Fitter-master/template/tb_dust_xl.py | #This code generate the KDTree template file that can be directly used.
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from sklearn.neighbors import KDTree
from scipy.interpolate import splrep, splev
from sedfit.fitter.template import Template
modelDir = "/Users/jinyi/Work/mcmc/Fitter/template/grain_models/crosec/"
silNameList = ["DL84", "amo4", "pyr7"]
graNameList = ["grap"]
silTList = np.arange(len(silNameList)) #The types of silicate dust.
graTList = np.arange(len(graNameList)) #The types of graphite dust.
#"""
#Obtain the silicate template interpolated list
tckSilList = []
parSilList = []
for nsil in silTList:
tmplDir = modelDir + "{0}.dat".format(silNameList[nsil])
modelSil = np.loadtxt(tmplDir)
size_all = modelSil[:, 0]
wave_all = modelSil[:, 1]
kappa_all = modelSil[:, 2]
sizeList = np.unique(size_all)
for sz in sizeList:
fltr = size_all == sz
wave = wave_all[fltr]
kappa = kappa_all[fltr]
tck = splrep(wave, kappa)
tckSilList.append(tck)
parSilList.append([nsil, sz])
kdtSil = KDTree(parSilList)
print("Silicate dust interpolation finishes!")
modelInfo = {
"type": silTList, #Silicate dust type
"size": sizeList,
"wavelength": wave,
}
parFormat = ["type", "size"]
readMe = '''
This silicate template is generated by prof. Aigen Li's code.
The interpolation is tested well!
'''
silDict = {
"tckList": tckSilList,
"kdTree": kdtSil,
"parList": parSilList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
#Obtain the graphite template interpolated list
tckGraList = []
parGraList = []
for ngra in graTList:
tmplDir = modelDir + "{0}.dat".format(graNameList[ngra])
modelGra = np.loadtxt(tmplDir)
size_all = modelGra[:, 0]
wave_all = modelGra[:, 1]
kappa_all = modelGra[:, 2]
sizeList = np.unique(size_all)
for sz in sizeList:
fltr = size_all == sz
wave = wave_all[fltr]
kappa = kappa_all[fltr]
tck = splrep(wave, kappa)
tckGraList.append(tck)
parGraList.append([ngra, sz])
kdtGra = KDTree(parGraList)
print("Graphite dust interpolation finishes!")
modelInfo = {
"type": graTList, #Silicate dust type
"size": sizeList,
"wavelength": wave,
}
parFormat = ["type", "size"]
readMe = '''
This graphite template is generated by prof. Aigen Li's code.
The interpolation is tested well!
'''
graDict = {
"tckList": tckGraList,
"kdTree": kdtGra,
"parList": parGraList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
#Save the template
templateReadMe = '''
This template file consists two template dicts for silicate and graphite dust
grain. Each of the dicts can be used as input of the Template class in sedfit
package.
'''
dustModel = {
"Silicate": silDict,
"Graphite": graDict,
"readMe": templateReadMe
}
fp = open("/Users/jinyi/Work/mcmc/Fitter/template/dust_xl_kdt.tmplt", "w")
pickle.dump(dustModel, fp)
fp.close()
#"""
##Test the KDTree and the interpolation
fp = open("/Users/jinyi/Work/mcmc/Fitter/template/dust_xl_kdt.tmplt", "r")
dustModel = pickle.load(fp)
fp.close()
#For the astronomical silicate
silDict = dustModel["Silicate"]
t = Template(**silDict)
colorList = ["r", "b", "m", "g", "y", "c"]
for nsil in silTList:
tmplDir = modelDir + "{0}.dat".format(silNameList[nsil])
modelSil = np.loadtxt(tmplDir)
size_all = modelSil[:, 0]
wave_all = modelSil[:, 1]
kappa_all = modelSil[:, 2]
sizeList = np.unique(size_all)
for nsz in range(len(sizeList)): # [0, 79, 99]:
sz = sizeList[nsz]
fltr = size_all == sz
wave = wave_all[fltr]
kappa = kappa_all[fltr]
par = [nsil, sz-0.01]
kappa_t = t(wave, par)
color = colorList[nsz%len(colorList)]
plt.plot(wave, kappa, linewidth=0.1, linestyle="-", color=color, label="{0}".format(sz))
plt.plot(wave, kappa_t, linewidth=0.1, linestyle=":", color=color)
plt.xscale("log")
plt.yscale("log")
#plt.legend(loc="best", fontsize=10)
plt.title("{0}".format("Silicate"), fontsize=24)
#plt.savefig("dust_sil.png")
plt.show()
#"""
#"""
#For the amorphous oliven
graDict = dustModel["Graphite"]
t = Template(**graDict)
colorList = ["r", "b", "m", "g", "y", "c"]
for ngra in graTList:
tmplDir = modelDir + "{0}.dat".format(graNameList[ngra])
modelGra = np.loadtxt(tmplDir)
size_all = modelGra[:, 0]
wave_all = modelGra[:, 1]
kappa_all = modelGra[:, 2]
sizeList = np.unique(size_all)
for nsz in range(len(sizeList)): #[0, 79, 99]:
sz = sizeList[nsz]
fltr = size_all == sz
wave = wave_all[fltr]
kappa = kappa_all[fltr]
par = [nsil, sz-0.01]
kappa_t = t(wave, par)
color = colorList[nsz%len(colorList)]
plt.plot(wave, kappa, linewidth=0.1, linestyle="-", color=color, label="{0}".format(sz))
plt.plot(wave, kappa_t, linewidth=0.1, linestyle=":", color=color)
plt.xscale("log")
plt.yscale("log")
#plt.legend(loc="best", fontsize=10)
plt.title("{0}".format("Graphite"), fontsize=24)
#plt.savefig("dust_sil.png")
plt.show()
#"""
| 5,187 | 29.339181 | 96 | py |
Fitter | Fitter-master/template/tb_dl07_MW.py | # This script is to generate the DL07 model template. The model templates only
# include the Milky Way models, because the SMG and LMG templates are not
# consistent change when the qPAH parameter change.
#
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from sedfit.fitter.template import Template
from sklearn.neighbors import KDTree
from scipy.interpolate import splrep, splev
f_test = 0
f_compile = 1
if f_compile:
fp = open("/Users/shangguan/Work/Fitter/template/dl07.tmplt", "r")
tmpl_dl07 = pickle.load(fp)
fp.close()
uminList = [0.10, 0.15, 0.20, 0.30, 0.40, 0.50, 0.70, 0.80, 1.00, 1.20,
1.50, 2.00, 2.50, 3.00, 4.00, 5.00, 7.00, 8.00, 10.0, 12.0,
15.0, 20.0, 25.0]
umaxList = [1e3, 1e4, 1e5, 1e6]
qpahList = [0.47, 1.12, 1.77, 2.50, 3.19, 3.90, 4.58] #Only for the Milky Way models.
mdust2mh = [0.01, 0.01, 0.0101, 0.0102, 0.0102, 0.0103, 0.0104] #Matched with the qpahList.
XList = []
tckList = []
counter = 0
for umin in uminList:
umaxList_exp = [umin] + umaxList
for umax in umaxList_exp:
for loop_qpah in range(len(qpahList)):
qpah = qpahList[loop_qpah]
mdmh = mdust2mh[loop_qpah]
fltr_umin = tmpl_dl07["umin"] == umin
fltr_umax = tmpl_dl07["umax"] == umax
fltr_qpah = tmpl_dl07["qpah"] == qpah
fltr = fltr_umin & fltr_umax & fltr_qpah
wave = tmpl_dl07[fltr]["wavesim"][0]
flux = tmpl_dl07[fltr]["fluxsim"][0]
sortIndex = np.argsort(wave)
wave = wave[sortIndex]
flux = flux[sortIndex]
tck = splrep(wave, flux)
tckList.append(tck)
XList.append([umin, umax, qpah])
counter += 1
kdt = KDTree(XList)
print("Interpolation finishes!")
wavelength = tmpl_dl07[0]["wavesim"]
modelInfo = {
"umin": uminList,
"umax": umaxList,
"qpah": qpahList,
"mdmh": mdust2mh,
"wavelength": wavelength,
}
parFormat = ["umin", "umax", "qpah"]
readMe = '''
This template is from: http://www.astro.princeton.edu/~draine/dust/irem.html
The templates used are only for the Milky Way model.
The interpolation is tested well!
'''
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
print("haha")
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
print("haha")
t = Template(**templateDict)
print("haha")
fp = open("dl07_kdt_mw.tmplt", "w")
#pickle.dump(t, fp)
pickle.dump(templateDict, fp)
fp.close()
if f_test:
fp = open("dl07_kdt_mw.tmplt", "r")
tpDict = pickle.load(fp)
fp.close()
fp = open("/Users/shangguan/Work/Fitter/template/dl07.tmplt", "r")
tmpl_dl07 = pickle.load(fp)
fp.close()
t = Template(**tpDict)
x = 10**np.linspace(0, 3, 1000)
pars = [0.44, 2e6, 2.3]
for i in range(100): #range(len(tmpl_dl07)):
umin = tmpl_dl07[i]["umin"]
umax = tmpl_dl07[i]["umax"]
qpah = tmpl_dl07[i]["qpah"]
wave = tmpl_dl07[i]["wavesim"]
flux = tmpl_dl07[i]["fluxsim"]
pars = [umin, umax, qpah]
flux_intp = t(wave, pars)
print np.max(abs(flux-flux_intp))
print(t.get_parFormat())
print(t.readme())
print t.get_nearestParameters(pars)
print pars
| 3,652 | 31.90991 | 95 | py |
Fitter | Fitter-master/template/tb_cat3d_H_wind.py | import numpy as np
import cPickle as pickle
import matplotlib.pyplot as plt
from sedfit.fitter.template import Template
from scipy.interpolate import splrep, splev
from sklearn.neighbors import KDTree
from astropy.table import Table
from glob import glob
N0List = [5, 7.5, 10]
awList = [-0.50, -1.00, -1.50, -2.00, -2.50]
fwdList = [0.15, 0.30, 0.45, 0.60, 0.75]
thetawList = [30, 45]
thetasigList = [7.50, 10.00, 15.00]
aList = [-0.50, -1.00, -1.50, -2.00, -2.50, -3.00]
hList = [0.10, 0.20, 0.30, 0.40, 0.50]
tauV = 50
Rout = 500
iList = [0, 15, 30, 45, 60, 75, 90]
length = len(glob('template/CAT3D-WIND_SED_GRID/*.*')) * 7
print(length)
XList = []
tckList = []
counter = 0
for filename in glob('template/CAT3D-WIND_SED_GRID/*.*'):
print(filename)
f = Table.read(filename, format='ascii')
if filename[36] != '1':
N0 = int(filename[36])
if N0 == 7:
N0 = 7.5
# print('N0 ', N0)
fwd = float(filename[41:45])
# print('fwd ', fwd)
a = float(filename[47:52])
# print('a ', a)
h = float(filename[54:58])
# print('h ', h)
aw = float(filename[61:66])
# print('aw ', aw)
thetaw = float(filename[73:75])
# print('thetaw ', thetaw)
thetasig = float(filename[84:88])
# print('thetasig ', thetasig)
else:
N0 = int(filename[36:38])
# print('N0 ', N0)
fwd = float(filename[42:46])
# print('fwd ', fwd)
a = float(filename[48:53])
# print('a ', a)
h = float(filename[55:59])
# print('h ', h)
aw = float(filename[62:67])
# print('aw ', aw)
thetaw = float(filename[74:76])
# print('thetaw ', thetaw)
thetasig = float(filename[85:89])
# print('thetasig ', thetasig)
for i in iList:
# print(i)
index = i/15 + 3
flux = f['col{0}'.format(index)]/f['col1']
tck = splrep(f['col2'], flux)
tckList.append(tck)
XList.append([a, h, N0, i, fwd, aw, thetaw, thetasig])
print("[{0}%]".format(100. * (counter + 1) / length))
counter += 1
print(counter)
wave = f['col2']
kdt = KDTree(XList)
print("Interpolation finishes!")
modelInfo = {
"a": aList,
"h": hList,
"N0": N0List,
"i": iList,
'fwd': fwdList,
'aw': awList,
'thetaw': thetawList,
'thetasig': thetasigList,
"wavelength": wave,
}
parFormat = ["a", "h", "N0", "i", 'fwd', 'aw', 'thetaw', 'thetasig']
readMe = '''
This template is from: http://www.sungrazer.org/cat3d.html
The interpolation is tested well!
'''
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
print("haha")
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
print("haha")
t = Template(**templateDict)
print("haha")
fp = open("template/Cat3d_H_wind.tmplt", "w")
# pickle.dump(t, fp)
pickle.dump(templateDict, fp)
fp.close()
# test of template
fp = open("template/Cat3d_H_wind.tmplt", "r")
tpDict = pickle.load(fp)
fp.close()
t = Template(**tpDict)
counter = 0
for filename in glob('template/CAT3D-WIND_SED_GRID/*.*'):
print(filename)
f = Table.read(filename, format='ascii')
if filename[36] != '1':
N0 = int(filename[36])
if N0 == 7:
N0 = 7.5
# print('N0 ', N0)
fwd = float(filename[41:45])
# print('fwd ', fwd)
a = float(filename[47:52])
# print('a ', a)
h = float(filename[54:58])
# print('h ', h)
aw = float(filename[61:66])
# print('aw ', aw)
thetaw = float(filename[73:75])
# print('thetaw ', thetaw)
thetasig = float(filename[84:88])
# print('thetasig ', thetasig)
else:
N0 = int(filename[36:38])
# print('N0 ', N0)
fwd = float(filename[42:46])
# print('fwd ', fwd)
a = float(filename[48:53])
# print('a ', a)
h = float(filename[55:59])
# print('h ', h)
aw = float(filename[62:67])
# print('aw ', aw)
thetaw = float(filename[74:76])
# print('thetaw ', thetaw)
thetasig = float(filename[85:89])
# print('thetasig ', thetasig)
for i in iList:
# print(i)
index = i/15 + 3
flux = f['col{0}'.format(index)]/f['col1']
tck = splrep(f['col2'], flux)
pars = [a, h, N0, i, fwd, aw, thetaw, thetasig]
flux_intp = t(wave, pars)
print(np.max(abs(flux - flux_intp) / flux_intp))
print("[{0}%]".format(100. * (counter + 1) / length))
counter += 1
if counter > 1000:
break
print(t.get_parFormat())
print(t.readme())
print(t.get_nearestParameters(pars)) | 4,283 | 24.2 | 77 | py |
Fitter | Fitter-master/template/bc03_sps_cha.py | import numpy as np
from sedfit.fitter.template import Template
from sklearn.neighbors import KDTree
from scipy.interpolate import splrep, splev
import matplotlib.pyplot as plt
import cPickle as pickle
from sgPhot import extractSED
ls_mic = 2.99792458e14 #micron/s
ls_aa = 2.99792458e18 #aa/s
Mpc = 3.08567758e24 #cm
pc10 = 3.08567758e19 #cm
Lsun = 3.828e33 #erg/s
mJy = 1e26 # From erg/s/cm^2/Hz to mJy
#Generate the templates of stellar emission templates
logMs = 0.0
ageList = np.logspace(np.log10(0.05), 1.0, 100)
waveLim = [5e2, 1e7]
#ageList = np.array([0.5, 1.0, 3.0, 5.0, 9.0])
#model_name = "bc03_ssp_z_0.02_chab.model"
#model_name = "bc03_exp_1.0_z_0.02_chab.model"
modelList = [
"bc03_ssp_z_0.02_chab.model",
"bc03_burst_0.1_z_0.02_chab.model",
"bc03_exp_0.1_z_0.02_chab.model",
"bc03_exp_1.0_z_0.02_chab.model",
"bc03_const_1.0_tV_0.2_z_0.02_chab.model",
"bc03_const_1.0_tV_5.0_z_0.02_chab.model",
]
nAge = len(ageList)
nModel = len(modelList)
sedDict= {}
for model_name in modelList:
sedDict[model_name] = []
for age in ageList:
sedDict[model_name].append(extractSED(age, 10**logMs, model_name=model_name))
#Interpolate with KD tree and spline interpolation
XList = []
tckList = []
for loop_m in range(nModel):
model_name = modelList[loop_m]
print "Constructing: {0}".format(model_name)
for loop in range(nAge):
age = ageList[loop]
(wave, flux) = sedDict[model_name][loop]
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
wave = wave[fltr] * 1e-4 # Convert to micron
flux = flux[fltr] * (4.0 * np.pi * pc10**2.) # Convert to erg/s/Hz
tck = splrep(wave, flux)
tckList.append(tck)
XList.append([age, loop_m])
kdt = KDTree(XList)
print("Interpolation finishes!")
modelInfo = {
"age": ageList,
"model_name": modelList
}
parFormat = ["age", "SFH"]
readMe = '''
The stellar emission templates are generated with EzGal.
The units of the wavelength and flux are angstrom and mJy.
This template uses different SFHs coded from 0 to {0}.
'''.format(nModel-1)
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
t = Template(**templateDict)
fp = open("bc03_sps_cha_kdt.tmplt", "w")
pickle.dump(templateDict, fp)
fp.close()
#Test
cl = ['r', 'g', 'b', 'y', 'c']
for loop in range(len(cl)):
pars = [ageList[loop], 0]
wave, flux = sedDict["bc03_ssp_z_0.02_chab.model"][loop]
fltr = (wave > waveLim[0]) & (wave < waveLim[1])
wave = wave[fltr] * 1e-4
flux = flux[fltr] * (4.0 * np.pi * pc10**2.)
f = t(wave, pars)
plt.plot(wave, f, color=cl[loop], linestyle="--")
plt.plot(wave, flux, color=cl[loop], linestyle=":")
#plt.ylim([1e14, 1e20])
plt.xscale("log")
plt.yscale("log")
plt.show()
| 2,988 | 30.463158 | 85 | py |
Fitter | Fitter-master/template/tb_cat3d_H.py | import numpy as np
import cPickle as pickle
import matplotlib.pyplot as plt
from sedfit.fitter.template import Template
from scipy.interpolate import splrep, splev
from sklearn.neighbors import KDTree
from astropy.table import Table
from glob import glob
aList = [-0.25, -0.50, -0.75, -1.00, -1.25, -1.50, -1.75, -2.00, -2.25, -2.50]
hList = [0.25, 0.50, 0.75, 1.00, 1.25, 1.50]
tauV = 50
Rout = 500
iList = [0, 15, 30, 45, 60, 75, 90]
length = len(glob('template/CAT3D_SED_GRID/*.*')) * 7
XList = []
tckList = []
counter = 0
for filename in glob('template/CAT3D_SED_GRID/*.*'):
print(filename)
f = Table.read(filename, format='ascii')
if filename[31] != '1':
N0 = int(filename[31])
print(N0)
a = float(filename[42:47])
print(a)
h = float(filename[49:53])
print(h)
else:
N0 = int(filename[31:33])
print(N0)
a = float(filename[43:48])
print(a)
h = float(filename[50:54])
print(h)
for i in iList:
print(i)
index = i/15 + 3
flux = f['col{0}'.format(index)]/f['col1']
tck = splrep(f['col2'], flux)
tckList.append(tck)
XList.append([a, h, N0, i])
print("[{0}%]".format(100. * (counter + 1) / length))
counter += 1
wave = f['col2']
kdt = KDTree(XList)
print("Interpolation finishes!")
modelInfo = {
"a": aList,
"N0": [5, 7, 10],
"h": hList,
"i": iList,
"wavelength": wave,
}
parFormat = ["a", "h", "N0", "i"]
readMe = '''
This template is from: http://www.sungrazer.org/cat3d.html
The interpolation is tested well!
'''
templateDict = {
"tckList": tckList,
"kdTree": kdt,
"parList": XList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
print("haha")
t = Template(tckList=tckList, kdTree=kdt, parList=XList, modelInfo=modelInfo,
parFormat=parFormat, readMe=readMe)
print("haha")
t = Template(**templateDict)
print("haha")
fp = open("template/Cat3d_H.tmplt", "w")
# pickle.dump(t, fp)
pickle.dump(templateDict, fp)
fp.close()
# test of template
fp = open("template/Cat3d_H.tmplt", "r")
tpDict = pickle.load(fp)
fp.close()
t = Template(**tpDict)
counter = 0
for filename in glob('template/CAT3D_SED_GRID/*.*'):
f = Table.read(filename, format='ascii')
if filename[31] != '1':
N0 = int(filename[31])
print(N0)
a = float(filename[42:47])
print(a)
h = float(filename[49:53])
print(h)
else:
N0 = int(filename[31:33])
print(N0)
a = float(filename[43:48])
print(a)
h = float(filename[50:54])
print(h)
for i in iList:
index = i/15 + 3
flux = f['col{0}'.format(index)]/f['col1']
pars = [a, h, N0, i]
flux_intp = t(wave, pars)
print(np.max(abs(flux - flux_intp) / flux_intp))
print("[{0}%]".format(100. * (counter + 1) / length))
counter += 1
if counter > 100:
break
print(t.get_parFormat())
print(t.readme())
print(t.get_nearestParameters(pars)) | 2,765 | 22.05 | 78 | py |
Fitter | Fitter-master/template/clumpy_pca.py | #This script use the PCA method to decompose the CLUMPY templates. It is found
#that the normalised templates are not very well recovered. Therefore, we
#decompose the original ones from "clumpy_models_201410_tvavg.hdf5".
#The aim of this decomposition is to reduce the data file size while keeping the
#accuracy of the templates.
#We first take the log10 of the templates and then subtract the mean of the
#logarithm fluxes from each of the template. The PCA decomposition is applied to
#the results. The steps come from Han&Han ApJ, 749, 123, 2012.
import h5py
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from sklearn.neighbors import KDTree
from sedfit.fitter.template import PCA_decompose
##Load the data for PCA and KDTree
#h = h5py.File('/Users/jinyi/Work/PG_QSO/templates/clumpy_fnu_norm.hdf5')
h = h5py.File('/Users/jinyi/Work/PG_QSO/templates/clumpy_models_201410_tvavg.hdf5')
h_q = h['q'].value
h_N0 = h['N0'].value
h_tv = h['tv'].value
h_sig = h['sig'].value
h_Y = h['Y'].value
h_i = h['i'].value
wave = h['wave'].value
flux_tor = h["flux_tor"].value
nSamples = len(flux_tor)
nFeatures = len(flux_tor[0])
print nSamples, nFeatures
parList = []
fnuList = []
for counter in range(nSamples):
q = h_q[counter]
N0 = h_N0[counter]
tv = h_tv[counter]
sig = h_sig[counter]
Y = h_Y[counter]
i = h_i[counter]
logfnu = np.log10(flux_tor[counter]) #For better behaviour of PCA decomposition.
parList.append([q, N0, tv, sig, Y, i])
fnuList.append(logfnu)
fnuList = np.array(fnuList)
fnuMean = np.mean(fnuList, axis=0)
ipList = np.zeros_like(fnuList)
for nf in range(nFeatures):
ipList[:, nf] = fnuList[:, nf] - fnuMean[nf] #For better behaviour of PCA decomposition.
##PCA decomposition
nComponents = 16
pcaResults = PCA_decompose(ipList, nComponents, svd_solver="full")
X_t = pcaResults["X_t"]
cmp = pcaResults["components"]
evr = pcaResults["evr"]
sEvr = sum(evr)
print "PCA finish! {0} components explain {1} of the variance.".format(nComponents, sEvr)
##Save the PCA decomposed results
#f = h5py.File("template/clumpy_pca.hdf5", "w")
f = h5py.File("template/clumpy_pca.hdf5", "w")
wave_f = f.create_dataset("wave", (nFeatures,), dtype="float")
fnu_mean = f.create_dataset("fnu_mean", (nFeatures,), dtype="float")
encoder = f.create_dataset("encoder", (nSamples, nComponents), dtype="float")
decoder = f.create_dataset("decoder", (nComponents, nFeatures), dtype="float")
wave_f[...] = wave
fnu_mean[...] = fnuMean
encoder[...] = X_t
decoder[...] = cmp
'''
f.attrs["README"] = """
This file saves the PCA decomposed CLUMPY model. The recovered data is
the mean subtracted log10(fnu) of the model. The integrated flux is
normalised to 1.
To recover the fnu, one should first recover the input array with the
PCA encoder and decoder. Add the mean SED and then use it as the
exponent of 10.
There are {0} principle components used.
""".format(nComponents)
'''
f.attrs["README"] = """
This file saves the PCA decomposed CLUMPY model. The recovered data is
the mean subtracted log10(fnu) of the model. The templates are not normalised.
To recover the fnu, one should first recover the input array with the
PCA encoder and decoder. Add the mean SED and then use it as the
exponent of 10.
There are {0} principle components used.
""".format(nComponents)
wave_f.attrs["README"] = "The corresponding wavelength array."
fnu_mean.attrs["README"] = "The mean SED of log10(fnu)."
encoder.attrs["README"] = "The decomposed templates."
encoder.attrs["nSamples"] = nSamples
encoder.attrs["nComponents"] = nComponents
decoder.attrs["README"] = """
The PCA components of the model. The total {0} components explain {1} of
the variance .
""".format(nComponents, sEvr)
decoder.attrs["nComponents"] = nComponents
decoder.attrs["nFeatures"] = nFeatures
f.flush()
f.close()
##Build up the KDTree
kdt = KDTree(parList)
print "The KDTree is built up!"
modelInfo = {
"q": np.unique(h_q),
"N0": np.unique(h_N0),
"tv": np.unique(h_tv),
"sigma": np.unique(h_sig),
"Y": np.unique(h_Y),
"i": np.unique(h_i),
"wavelength": wave,
}
parFormat = ["q", "N0", "tv", "sigma", "Y", "i"]
readMe = '''
This template is from: http://www.pa.uky.edu/clumpy/
The interpolation is tested well!
'''
templateDict = {
"pcaFile": "clumpy_unnorm_pca.hdf5",
"kdTree": kdt,
"parList": parList,
"modelInfo": modelInfo,
"parFormat": parFormat,
"readMe": readMe
}
fp = open("clumpy_kdt.tmplt", "w")
pickle.dump(templateDict, fp)
fp.close()
| 4,611 | 33.162963 | 92 | py |
Fitter | Fitter-master/postprocess/extraction.py | #!/Users/jinyi/anaconda/bin/python
from __future__ import print_function
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
matplotlib_version = eval(matplotlib.__version__.split(".")[0])
if matplotlib_version > 1:
plt.style.use("classic")
plt.rc('font',family='Times New Roman')
import sys
import types
import numpy as np
import cPickle as pickle
import sedfit.SED_Toolkit as sedt
from sedfit.mcmc import mcmc_emcee as mcmc
from PostProcessTools import *
ls_mic = 2.99792458e14 #micron/s
#Parse the commands#
#-------------------#
fitrsFile = sys.argv[1]
fp = open(fitrsFile, "r")
fitrs = pickle.load(fp)
fp.close()
if len(sys.argv) == 3:
try:
nSamples = int(sys.argv[2])
except:
raise ValueError("The second argument ({0}) is not an integer!".format(sys.argv[2]))
else:
nSamples = 100
#--> Dump the model dict
dumpModelDict(fitrs)
#The code starts#
#################
print("#################################")
print("# Galaxy SED Fitter Extraction #")
print("#################################")
readme = """
The structure of the dict is as follows:
{
'dataPck': {...}, # Information of the target and the data.
'waveModel': [...], # The wavelength of the models.
'Best-Fit': {
'variables': [...], List of variable name in Latex format.
'Total': [...], # The best-fit total model.
'Components': {...}, # The best-fit model of each components.
'Components_add': {...}, # The best-fit model of each components without the multiplicative component(s).
'Photometry': [...] # The synthetic photometric result of best-fit model.
},
'Variation': {0: {
'Total': [...], # The total model of one set of randomly sampled parameters.
'Components': {...}, # The model components of one set of randomly sampled parameters.
'Components_add': {...}, # The model components without the multiplicative component(s) of one set of randomly sampled parameters.
'Photometry': [...] # The synthetic photometric result of one set of randomly sampled parameters.
},
1: {'Total': [...], 'Components': {...}},
2: {'Total': [...], 'Components': {...}}
... # In total %d randomly sampled models.
},
'readme': '...' # This note.
}
"""%(nSamples)
silent = True
dataPck = fitrs["dataPck"]
targname = dataPck["targname"]
redshift = dataPck["redshift"]
distance = dataPck["distance"]
dataDict = dataPck["dataDict"]
modelPck = fitrs["modelPck"]
print("The target info:")
print("Name: {0}".format(targname))
print("Redshift: {0}".format(redshift))
print("Distance: {0}".format(distance))
print("Extracting {0} models as uncertainty.".format(nSamples))
print(readme)
#-> Load the data
sedData = dataLoader(fitrs, silent)
#-> Load the model
sedModel = modelLoader(fitrs, silent)
cleanTempFile()
parTruth = modelPck["parTruth"] #Whether to provide the truth of the model
modelUnct = False #modelPck["modelUnct"] #Whether to consider the model uncertainty in the fitting
#-> Build the emcee object
em = mcmc.EmceeModel(sedData, sedModel, modelUnct)
#-> Extract the models
waveModel = modelPck["waveModel"]
extractDict = {# The dict of extracted data.
"dataPck": fitrs["dataPck"],
"waveModel": waveModel,
"readme": readme
}
#--> Best-fit model
extractDict["Best-Fit"] = {}
fraction = 0
burnIn = 0
ps = fitrs["posterior_sample"]
pcnt = em.p_median(ps, burnin=burnIn, fraction=fraction)
extractDict["Best-Fit"]["variables"] = sedModel.get_parVaryNames()
extractDict["Best-Fit"]["Total"] = sedModel.combineResult(x=waveModel)
extractDict["Best-Fit"]["Components"] = sedModel.componentResult(x=waveModel)
extractDict["Best-Fit"]["Components_add"] = sedModel.componentAddResult(x=waveModel)
extractDict["Best-Fit"]["Photometry"] = sedData.model_pht(waveModel, extractDict["Best-Fit"]["Total"])
#--> Model variation
extractDict["Variation"] = {}
counter = 0
for pars in ps[np.random.randint(len(ps), size=nSamples)]:
sedModel.updateParList(pars)
ytotal = sedModel.combineResult(x=waveModel)
extractDict["Variation"][counter] = {
"Total": ytotal,
"Components": sedModel.componentResult(x=waveModel),
"Components_add": sedModel.componentAddResult(x=waveModel),
"Photometry": sedData.model_pht(waveModel, ytotal)
}
counter += 1
#-> Save the extracted data
fp = open("{0}.xtr".format(targname), "w")
fitrs = pickle.dump(extractDict, fp)
fp.close()
| 4,532 | 33.340909 | 151 | py |
Fitter | Fitter-master/postprocess/__init__.py | 0 | 0 | 0 | py |
|
Fitter | Fitter-master/postprocess/PostProcessTools.py | #This script provide some functions to do the postprocess of the fitting sampling.
#
import os
import numpy as np
import cPickle as pickle
from sedfit.dir_list import root_path
from scipy.interpolate import interp1d
ls_mic = 2.99792458e14 #unit: micron/s
Mpc = 3.08567758e24 #unit: cm
mJy = 1e26 #unit: erg/s/cm^2/Hz
__all__ = ["AddDict", "MatchDict", "parStatistics", "Luminosity_Integrate",
"Luminosity_Specific", "L_Total", "randomSampler", "CorrectParameters",
"Flux_Pht_Component", "dumpModelDict", "cleanTempFile", "dataLoader",
"modelLoader", "ChiSq"]
def AddDict(targetDict, quantName, quant, nFillPar=None):
"""
To add a quantity into the target dict. If there is a parameter omitted
before, we need to fill the list with nan before the add item.
"""
#->If the parameter is added before.
if quantName in targetDict.keys():
#print "No.{0}: {1} is there!".format(nFillPar, quantName)
targetDict[quantName].append(quant)
else:#->If the parameter is not add before.
#print "No.{0}: {1} is not there!".format(nFillPar, quantName)
if (nFillPar is None) or (nFillPar == 0):#->If no parameter possibly omitted.
targetDict[quantName] = [quant]
else:#->If there is parameter omitted.
#print "Add nan to {0} for No.{1} source!".format(quantName, nFillPar)
ngap = nFillPar #->Find how many items are omitted.
#->Add the items that a omitted.
parList = [np.nan for loop in range(ngap)]
#->Add the current item.
parList.append(quant)
targetDict[quantName] = parList
return None
def MatchDict(targetDict):
"""
To match the lengths of the lists in the dict by filling them with nan.
"""
lmin = np.inf
lmax = -1
for quantName in targetDict.keys():
lq = len(targetDict[quantName])
if lq > lmax:
lmax = lq
if lq < lmin:
lmin = lq
ldiff = lmax-lmin
if ldiff > 1:
raise ValueError("The length difference ({0}) is larger than 1!".format(ldiff))
for quantName in targetDict.keys():
parList = targetDict[quantName]
if len(parList) < lmax:
parList.append(np.nan)
return None
def parStatistics(ppfunc, nSamples, ps, fargs=[], fkwargs={}):
"""
Get the statistics of the model calculated parameters.
Parameters
----------
ppfunc : function
The function to post process the model posterior sampling.
nSamples : int
The number of sampling when it calculate the parameter statistics.
ps : array
The posterior sampling.
Returns
-------
pList : array
The parameter list.
Notes
-----
None.
"""
pList = []
for pars in ps[np.random.randint(len(ps), size=nSamples)]:
pList.append(ppfunc(pars, *fargs, **fkwargs))
pList = np.array(pList)
return pList
def Luminosity_Integrate(flux, wave, DL, z, waveRange=[8.0, 1e3], frame="rest"):
"""
Calculate the integrated luminosity of input SED with given wavelength range.
Parameters
----------
flux : float array
The flux density used to integrate to the luminosity.
wave : float array
The wavelength of the SED.
DL : float
The luminosity distance of the source.
z : float
The redshift of the source.
waveRange : float array
The short and long end of the wavelength to integrate.
frame : string
The flux and wave should be consistently in the rest frame ("rest") or
observing frame ("obs").
Returns
-------
L : float
The luminosity of the SED within the given wavelength range, unit: erg/s.
Notes
-----
None.
"""
nu = ls_mic / wave
fltr = (wave > waveRange[0]) & (wave < waveRange[1])
F = -1.0 * np.trapz(flux[fltr], nu[fltr]) / mJy #unit: erg/s/cm^2
if frame == "rest":
L = F * 4.0*np.pi * (DL * Mpc)**2.0 / (1 + z)**2
elif frame == "obs":
L = F * 4.0*np.pi * (DL * Mpc)**2.0
else:
raise ValueError("Cannot recognise the frame: {0}!".format(frame))
return L
def Luminosity_Specific(flux, wave, wave0, DL, z, frame="rest"):
"""
Calculate the specific luminosity at wave0 for the SED (wave, flux).
Parameters
----------
flux : float array
The flux density of the SED.
wave : float array
The wavelength of the SED.
wave0 : float (array)
The wavelength(s) to be caculated.
DL : float
The luminosity distance.
z : float
The redshift.
Returns
-------
Lnu : float (array)
The specific luminosity (array) at wave0 that is (are) required, unit: erg/s/Hz.
Notes
-----
None.
"""
S0 = interp1d(wave, flux)(wave0)
if frame == "rest":
Lnu = S0 * 4.0*np.pi * (DL * Mpc)**2.0 / (1 + z)**2 / mJy #unit: erg/s/Hz
elif frame == "obs":
Lnu = S0 * 4.0*np.pi * (DL * Mpc)**2.0 / (1 + z) / mJy #unit: erg/s/Hz
return Lnu
def L_Total(pars, sedModel, DL, z):
"""
Calculate the total luminosity.
"""
sedModel.updateParList(pars)
wave = sedModel.get_xList()
flux = sedModel.combineResult()
L = Luminosity_Integrate(flux, wave, DL, z, waveRange=[8.0, 1e3], frame="rest")
return L
def randomSampler(parRange, parType="D"):
"""
This function randomly sample the given parameter space.
"""
if parType == "D": #For discrete parameters
p = np.random.choice(parRange)
elif parType == "C": #For continuous parameters
r1, r2 = parRange
p = (r2 - r1) * np.random.rand() + r1
else:
raise ValueError("The parameter type '{0}' is not recognised!".format(parType))
return p
def CorrectParameters(sedModel, silent=True):
"""
Correct the parameters if there are some discrete parameters interpolated
with nearest neighbor method.
Parameters
----------
sedModel: ModelCombiner object
The ModelCombiner object.
silent : bool; default: True
Print information and return the parameter names for check if silent is
False.
Returns
-------
parList : list
The list of parameters, the sequence of which is important.
Notes
-----
None.
"""
from sedfit import model_functions as sedmf
modelDict = sedModel.get_modelDict()
parModel = sedModel.get_modelParDict()
parList = []
for model in modelDict.keys():
funcName = modelDict[model].get_function_name()
fitDict = parModel[model]
pnList = fitDict.keys()
if funcName in sedmf.discreteFuncList:
if not silent:
print("{0} is discrete".format(model))
inDict = {}
for pn in pnList:
inDict[pn] = fitDict[pn]["value"]
exec "Func_PosPar = sedmf.{0}_PosPar".format(funcName)
outDict = Func_PosPar(**inDict)
for pn in pnList:
if fitDict[pn]["vary"]:
parList.append(outDict[pn])
else:
if not silent:
print("{0} is not discrete".format(model))
for pn in pnList:
if fitDict[pn]["vary"]:
parList.append(fitDict[pn]["value"])
return parList
def Flux_Pht_Component(pars, component, sedModel, sedData):
"""
Calculate the rest-frame flux in the photometric bands of one specific
model component.
Parameters
----------
pars : list
The parameter list.
component : string
The name of the component.
sedModel : ModelCombiner object
The combined model.
sedData : SEDClass object
The data set of SED.
Returns
-------
fluxModelPht : list
The list of photometric flux calculated from the model.
Notes
-----
None.
"""
sedModel.updateParList(pars)
result_cmp = sedModel.componentResult()
waveModel = sedModel.get_xList()
fluxModel = result_cmp[component]
fluxModelPht = sedData.model_pht(waveModel, fluxModel)
return fluxModelPht
def Flux_Pht_Total(pars, sedModel, sedData):
"""
Calculate the rest-frame flux in the photometric bands of
the total model.
Parameters
----------
pars : list
The parameter list.
sedModel : ModelCombiner object
The combined model.
sedData : SEDClass object
The data set of SED.
Returns
-------
fluxModelPht : list
The list of photometric flux calculated from the model.
Notes
-----
None.
"""
sedModel.updateParList(pars)
waveModel = sedModel.get_xList()
fluxModel = sedModel.combineResult()
fluxModelPht = sedData.model_pht(waveModel, fluxModel)
return fluxModelPht
def dumpModelDict(fitrs):
"""
Dump the model dict.
Parameters
----------
fitrs : DictType
The dict of the fitting results.
Returns
-------
None.
Notes
-----
None.
"""
modelPck = fitrs["modelPck"]
modelDict = modelPck["modelDict"]
fp = open("{0}temp_model.dict".format(root_path), "w")
pickle.dump(modelDict, fp)
fp.close()
def cleanTempFile():
"""
Clean the tempt file.
"""
os.remove("{0}temp_model.dict".format(root_path))
def modelLoader(fitrs, QuietMode=True):
"""
Load the model object. Import the module within the function.
Parameters
----------
fitrs : DictType
The dict of the fitting results.
QuietMode : bool
Use verbose mode if False.
Returns
-------
sedModel: ModelCombiner
The model combiner object.
Notes
-----
None.
"""
from sedfit import model_functions as sedmf
reload(sedmf)
from sedfit.sedmodel import SedModel
modelPck = fitrs["modelPck"]
funcLib = sedmf.funcLib
modelDict = modelPck["modelDict"]
waveModel = modelPck["waveModel"]
parAddDict_all = modelPck["parAddDict_all"]
DL = parAddDict_all["DL"]
sedModel = SedModel(modelDict, funcLib, waveModel, parAddDict_all)
return sedModel
def dataLoader(fitrs, QuietMode=True):
"""
Load the data objects. Import the module within the function.
"""
from sedfit import sedclass as sedsc
#-> Setup the data
dataPck = fitrs["dataPck"]
targname = dataPck["targname"]
redshift = dataPck["redshift"]
distance = dataPck["distance"]
dataDict = dataPck["dataDict"]
sedPck = dataPck["sedPck"]
sedData = sedsc.setSedData(targname, redshift, distance, dataDict, sedPck,
QuietMode)
return sedData
def ChiSq(data, model, unct):
"""
This is a simple chi square.
Parameters
----------
data : array
The data values.
model : array
The model values.
unct : array
The uncertainty values.
Returns
-------
csq : float
The chi square.
"""
csq = np.sum( ( (model - data) / unct )**2 )
return csq
if __name__ == "__main__":
import cPickle as pickle
from sedfit import sedclass as sedsc
from sedfit.fitter import basicclass as bc
from sedfit import model_functions as sedmf
fitrsPath = "./"
f = open(fitrsPath+"SDSSJ0041-0952.fitrs", "r")
fitrs = pickle.load(f)
f.close()
dataPck = fitrs["dataPck"]
targname = dataPck["targname"]
redshift = dataPck["redshift"]
distance = dataPck["distance"]
dataDict = dataPck["dataDict"]
sedPck = dataPck["sedPck"]
sedData = sedsc.setSedData(targname, redshift, distance, dataDict, sedPck, True)
#->Setup the model
modelPck = fitrs["modelPck"]
funcLib = sedmf.funcLib
modelDict = modelPck["modelDict"]
waveModel = modelPck["waveModel"]
parAddDict_all = modelPck["parAddDict_all"]
sedModel = bc.Model_Generator(modelDict, funcLib, waveModel, parAddDict_all)
pars = sedModel.get_parList()
print Flux_Pht_Component(pars, "BC03", sedModel, sedData)
| 12,092 | 27.656398 | 88 | py |
Fitter | Fitter-master/postprocess/postprocess.py | #!/Users/jinyi/anaconda/bin/python
from __future__ import print_function
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
matplotlib_version = eval(matplotlib.__version__.split(".")[0])
if matplotlib_version > 1:
plt.style.use("classic")
plt.rc('font',family='Times New Roman')
import sys
import types
import numpy as np
import cPickle as pickle
import sedfit.SED_Toolkit as sedt
from sedfit.mcmc import mcmc_emcee as mcmc
from PostProcessTools import *
from matplotlib.ticker import FuncFormatter, FormatStrFormatter
ls_mic = 2.99792458e14 #micron/s
xlabelDict = {
"cm": r'Rest Wavelength (cm)',
"mm": r'Rest Wavelength (mm)',
"micron": r'Rest Wavelength $\mathrm{(\mu m)}$',
"angstrom": r'Rest Wavelength $\mathrm{(\AA)}$',
"Hz": r'Rest Frequency (Hz)',
"MHz": r'Rest Frequency (MHz)',
"GHz": r'Rest Frequency (GHz)',
}
ylabelDict = {
"fnu": r'$f_\nu \, \mathrm{(mJy)}$',
"nufnu": r'$\nu f_\nu \, \mathrm{(erg\,s^{-1}\,cm^{-2})}$',
}
#Parse the commands#
#-------------------#
fitrsFile = sys.argv[1]
fp = open(fitrsFile, "r")
fitrs = pickle.load(fp)
fp.close()
#--> Dump the model dict
dumpModelDict(fitrs)
#The code starts#
#################
print("#################################")
print("# Galaxy SED Fitter postprocess #")
print("#################################")
silent = True
dataPck = fitrs["dataPck"]
targname = dataPck["targname"]
redshift = dataPck["redshift"]
distance = dataPck["distance"]
dataDict = dataPck["dataDict"]
modelPck = fitrs["modelPck"]
print("The target info:")
print("Name: {0}".format(targname))
print("Redshift: {0}".format(redshift))
print("Distance: {0}".format(distance))
#-> Load the data
sedData = dataLoader(fitrs, silent)
#-> Load the model
sedModel = modelLoader(fitrs, silent)
cleanTempFile()
parTruth = modelPck["parTruth"] #Whether to provide the truth of the model
modelUnct = False #modelPck["modelUnct"] #Whether to consider the model uncertainty in the fitting
#-> Build the emcee object
em = mcmc.EmceeModel(sedData, sedModel, modelUnct)
#posterior process settings#
#--------------------------#
ppDict = fitrs["ppDict"]
psLow = ppDict["low"]
psCenter = ppDict["center"]
psHigh = ppDict["high"]
nuisance = ppDict["nuisance"]
fraction = 0
burnIn = 0
ps = fitrs["posterior_sample"]
#-> Plot the SED data and fit
xUnits = "micron"
yUnits = "nufnu"
sedwave = np.array(sedData.get_List("x"))
sedflux = np.array(sedData.get_List("y"))
if yUnits == "nufnu":
sedflux *= ls_mic / sedwave * 1.e-26
fig = plt.figure(figsize=(10, 5))
ax = plt.gca()
xmin = np.min(sedwave) * 0.9 #0.7 #
xmax = np.max(sedwave) * 1.1 #600 #
ymin = np.min(sedflux) * 0.5
ymax = 2e-10 #np.max(sedflux) * 5.0
xlim = [xmin, xmax]
ylim = [ymin, ymax]
cList = ["green", "orange", "blue", "yellow", "purple"]
cKwargs = { #The line properties of the model components.
"ls_uc": "--",
"alpha_uc": 0.1,
"lw_uc": 0.5,
"ls_bf": "--",
"alpha_bf": 1.0,
"lw_bf": 1.0,
}
tKwargs = { #The line properties of the model total.
"ls_uc": "-",
"alpha_uc": 0.1,
"lw_uc": 0.5,
"ls_bf": "-",
"alpha_bf": 1.0,
"lw_bf": 3.0,
"color": "red",
}
em.plot_fit(truths=parTruth, FigAx=(fig, ax), xlim=xlim, ylim=ylim, nSamples=100,
burnin=burnIn, fraction=fraction, cList=cList, cLineKwargs=cKwargs,
tLineKwargs=tKwargs, ps=ps, xUnits=xUnits, yUnits=yUnits)
ax.set_xlabel(xlabelDict[xUnits], fontsize=24)
ax.set_ylabel(ylabelDict[yUnits], fontsize=24)
plotName = targname
nameSeg = plotName.split("-")
if (len(nameSeg) > 1):
plotName = "$-$".join(nameSeg)
#plotName = "PG {0}".format(plotName[2:])
ax.text(0.05, 0.95, "{0}".format(plotName),
verticalalignment='top', horizontalalignment='left',
transform=ax.transAxes, fontsize=24,
bbox=dict(facecolor='white', alpha=0.5, edgecolor="none"))
ax.tick_params(axis="both", which="major", length=8, labelsize=18, direction="in")
ax.tick_params(axis="both", which="minor", length=5)
#-->Set the legend
phtName = dataDict["phtName"]
spcName = dataDict["spcName"]
handles, labels = ax.get_legend_handles_labels()
handleUse = []
labelUse = []
nLabels = len(labels)
if "Torus" in labels:
modelLabelList = ["Total", "BC03", "Torus", "DL07"] #
else:
modelLabelList = ["Total", "BC03", "DL07"] #
if "Jet" in labels:
modelLabelList.append("Jet")
dataLabelList = ["Model", "Phot"]
if "IRS" in labels:
dataLabelList.append("IRS")
labelList = modelLabelList + dataLabelList
for lb in labelList:
idx = labels.index(lb)
hd = handles[idx]
if lb == phtName:
hd = hd[0]
labelUse.append(lb)
handleUse.append(hd)
plt.legend(handleUse, labelUse, loc="upper right", fontsize=16, numpoints=1,
handletextpad=0.3, handlelength=(4./3.), bbox_to_anchor=(0.99,0.98),
ncol=3, framealpha=0., edgecolor="white")
plt.savefig("{0}_result.pdf".format(targname), bbox_inches="tight")
plt.close()
print("Best fit plot finished!")
"""
#Plot the corner diagram
em.plot_corner(filename="{0}_triangle.png".format(targname), burnin=burnIn, ps=ps,
nuisance=nuisance, truths=parTruth, fraction=fraction,
quantiles=[psLow/100., psCenter/100., psHigh/100.], show_titles=True,
title_kwargs={"fontsize": 20})
print("Triangle plot finished!")
"""
| 5,339 | 29.689655 | 98 | py |
Fitter | Fitter-master/configs/config_example_rl.py | #This config file is for the radio loud sources.
#
import numpy as np
from collections import OrderedDict
################################################################################
# Data #
################################################################################
targname = "PG0003+158"
redshift = 0.45
distance = None #Luminosity distance
sedFile = "data/{0}_obs.csed".format(targname)
sedName = "Phot"
spcName = "IRS"
dataDict = {
"bandList_use": [],
"bandList_ignore":["WISE_w3", "WISE_w4"],
"frame": "obs",
}
################################################################################
# Model #
################################################################################
waveModel = 10**np.linspace(-0.1, 7.0, 1000)
#parAddDict_all = {}
modelDict = OrderedDict(
(
("BC03", {
"function": "BC03",
"logMs":{
"value": 11.45,
"range": [6., 14.],
"type": "c",
"vary": True,
"latex": r"$\mathrm{log}\,M_\mathrm{s}$",
},
"age":{
"value": 5,
"range": [0.3, 10.],
"type": "c",
"vary": False, #True, #
"latex": r"$Age$",
},
}
),
("Hot_Dust", {
"function": "BlackBody",
"logOmega": {
"value": -17.24,
"range": [-25.0, -10.0], #[0.3, 0.4], #
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,\Omega$",
},
"T": {
"value": 944.47,
"range": [500, 1500], #[846., 847], #
"type": "c",
"vary": True, #False, #
"latex": r"$T$",
}
}
),
("CLUMPY", {
"function": "CLUMPY_intp",
"logL": {
"value": 45.31,
"range": [40.0, 50.0], #[6.3, 6.4],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,L_\mathrm{Torus}$",
},
"i": {
"value": 80.52,
"range": [0.0, 90.0], #[47.0, 48.0], #
"type": "c",
"vary": True, #False, #
"latex": r"$i$",
},
"tv": {
"value": 267.41,
"range": [10.0, 300.0], #[17, 18], #
"type": "c",
"vary": True, #False, #
"latex": r"$\tau_\nu$",
},
"q": {
"value": 1.99,
"range": [0.0, 3.0], #[0.6, 0.8], #
"type": "c",
"vary": True, #False, #
"latex": r"$q$",
},
"N0": {
"value": 3.55,
"range": [1.0, 15.0], #[6.42, 6.44], #
"type": "c",
"vary": True, #False, #
"latex": r"$N_0$",
},
"sigma": {
"value": 55.81,
"range": [15.0, 70.0], #[58.0, 59.0], #
"type": "c",
"vary": True, #False, #
"latex": r"$\sigma$",
},
"Y": {
"value": 28.94,
"range": [5.0, 100.0], #[29., 31.], #
"type": "c",
"vary": True, #False, #
"latex": r"$Y$",
}
}
),
("DL07", {
"function": "DL07",
"logumin": {
"value": 0.699,
"range": [-1.0, 1.4], #log[0.1, 25]
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,U_\mathrm{min}$",
},
"logumax": {
"value": 6,
"range": [3, 6], #log[1e3, 1e6]
"type": "c",
"vary": False, #True, #
"latex": r"$\mathrm{log}\,U_\mathrm{max}$",
},
"qpah": {
"value": 0.47, #10**0.504,
"range": [0.3, 4.8],#10**[-1.0, 0.661],
"type": "c",
"vary": True, #False, #
"latex": r"$q_\mathrm{PAH}$",
},
"gamma": {
"value": 0.071,
"range": [0.0, 1.0], #[0.01, 0.03],
"type": "c",
"vary": True, #False, #
"latex": r"$\gamma$",
},
"logMd": {
"value": 8.42,
"range": [6.0, 11.0], #[9.0, 10.0],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,M_\mathrm{d}$",
}
}
),
("Jet", {
"function": "Synchrotron",
"Sn_alpha": {
"value": -1.,
"range": [0., 5.0], #[0.01, 0.03],
"type": "c",
"vary": True, #False, #
"latex": r"$\alpha_\mathrm{S}$",
},
"Sn_logsf": {
"value": 8.42,
"range": [-5.0, 5.0], #[9.0, 10.0],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,f_\mathrm{S}$",
}
}
),
)
)
parTruth = None #Whether to provide the truth of the model
modelUnct = True #Whether to consider the model uncertainty in the fitting
unctDict = OrderedDict(
(
("lnf" , [-10, 2]),
("lna" , [-10, 2]),
("lntau", [-5, 2.5]),
)
)
################################################################################
# emcee #
################################################################################
#emcee options#
#-------------#
emceeDict = OrderedDict(
(
("sampler" , "EnsembleSampler"),
#("ntemps" , 3),
("nwalkers" , 100),
("iteration" , 3),
("iter-step" , 500),
("ball-r" , 0.1),
("ball-t" , 1.0),
("run-step" , 2000),
("burn-in" , 1000),
("thin" , 1),
("threads" , 4),
("printfrac" , 0.1),
)
)
#Postprocess#
#-----------#
ppDict = {
"low": 16,
"center": 50,
"high": 84,
"nuisance": True, #False, #
"fraction": 10, #The fraction of walkers to be dropped.
}
| 7,226 | 32.151376 | 80 | py |
Fitter | Fitter-master/configs/config_example_dl07.py | #This config file is for the radio quiet sources.
#
import numpy as np
from collections import OrderedDict
################################################################################
# Data #
################################################################################
targname = "PG0050+124"
redshift = 0.061
sedFile = "examples/PG0050+124_dl07_0.msed"
sedName = "2M&W&H"
spcName = None #"IRS"
dataDict = {
"bandList_use": [],
"bandList_ignore":[],
"frame": "rest",
}
################################################################################
# Model #
################################################################################
waveModel = 10**np.linspace(-0.1, 3.0, 1000)
modelDict = OrderedDict(
(
("BC03", {
"function": "BC03",
"logMs":{
"value": 9.,
"range": [6., 14.],
"type": "c",
"vary": True,
"latex": r"$\mathrm{log}\,M_\mathrm{s}$",
},
"age":{
"value": 5,
"range": [0.3, 10.],
"type": "c",
"vary": False, #True, #
"latex": r"$Age$",
},
}
),
("DL07", {
"function": "DL07",
"logumin": {
"value": 1.0,
"range": [-1.0, 1.4], #log[0.1, 25]
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,U_\mathrm{min}$",
},
"logumax": {
"value": 6,
"range": [3, 6], #log[1e3, 1e6]
"type": "c",
"vary": False, #True, #
"latex": r"$\mathrm{log}\,U_\mathrm{max}$",
},
"qpah": {
"value": 3.19, #10**0.504,
"range": [0.1, 4.58],#10**[-1.0, 0.661],
"type": "c",
"vary": True, #False, #
"latex": r"$q_\mathrm{PAH}$",
},
"gamma": {
"value": 0.02,
"range": [0.0, 1.0], #[0.01, 0.03],
"type": "c",
"vary": True, #False, #
"latex": r"$\gamma$",
},
"logMd": {
"value": 9.12,
"range": [6.0, 11.0], #[9.0, 10.0],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,M_\mathrm{d}$",
}
}
),
)
)
parTruth = None #Whether to provide the truth of the model
modelUnct = True #Whether to consider the model uncertainty in the fitting
unctDict = OrderedDict(
(
("lnf" , [-10, 10]),
("lna" , [-10, 1]),
("lntau", [-5, 2.5]),
)
)
################################################################################
# emcee #
################################################################################
#emcee options#
#-------------#
emceeDict = OrderedDict(
(
("sampler" , "EnsembleSampler"),
#("ntemps" , 3),
("nwalkers" , 100),
("iteration" , 3),
("iter-step" , 500),
("ball-r" , 0.3),
("ball-t" , 1.0),
("run-step" , 2000),
("burn-in" , 1000),
("thin" , 1),
("threads" , 4),
("printfrac" , 0.1),
)
)
#Postprocess#
#-----------#
ppDict = {
"low": 16,
"center": 50,
"high": 84,
"nuisance": True, #False, #
"fraction": 10, #The fraction of walkers to be dropped.
}
| 4,014 | 30.614173 | 80 | py |
Fitter | Fitter-master/configs/config_example_rq.py | #This config file is for the radio quiet sources.
#
import numpy as np
from collections import OrderedDict
################################################################################
# Data #
################################################################################
targname = "PG1351+236" #"PG0050+124" #"PG0043+039" #"PG1259+593" #"PG1211+143" #
redshift = 0.055 #0.061 #0.384 #0.472 #0.085 #
distance = None
sedFile = "data/PG1351+236_obs.csed" #"data/PG0050+124_rest.tsed" #"data/PG0043+039_rest.tsed" #"data/PG1259+593_rest.tsed" #"data/PG1211+143_rest.tsed" #
sedName = "2M&W&H"
spcName = "IRS"
dataDict = {
"bandList_use": [],
"bandList_ignore":["WISE_w3", "WISE_w4"],
"frame": "obs",
}
################################################################################
# Model #
################################################################################
waveModel = 10**np.linspace(-0.1, 3.0, 1000)
#parAddDict_all = {}
modelDict = OrderedDict(
(
("BC03", {
"function": "BC03",
"logMs":{
"value": 11.45,
"range": [6., 14.],
"type": "c",
"vary": True,
"latex": r"$\mathrm{log}\,M_\mathrm{s}$",
},
"age":{
"value": 5,
"range": [0.3, 10.],
"type": "c",
"vary": False, #True, #
"latex": r"$Age$",
},
}
),
("Hot_Dust", {
"function": "BlackBody",
"logOmega": {
"value": -17.24,
"range": [-25.0, -10.0], #[0.3, 0.4], #
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,\Omega$",
},
"T": {
"value": 944.47,
"range": [500, 1500], #[846., 847], #
"type": "c",
"vary": True, #False, #
"latex": r"$T$",
}
}
),
("CLUMPY", {
"function": "CLUMPY_intp",
"logL": {
"value": 45.31,
"range": [40.0, 50.0], #[6.3, 6.4],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,L_\mathrm{Torus}$",
},
"i": {
"value": 80.52,
"range": [0.0, 90.0], #[47.0, 48.0], #
"type": "c",
"vary": True, #False, #
"latex": r"$i$",
},
"tv": {
"value": 267.41,
"range": [10.0, 300.0], #[17, 18], #
"type": "c",
"vary": True, #False, #
"latex": r"$\tau_\nu$",
},
"q": {
"value": 1.99,
"range": [0.0, 3.0], #[0.6, 0.8], #
"type": "c",
"vary": True, #False, #
"latex": r"$q$",
},
"N0": {
"value": 3.55,
"range": [1.0, 15.0], #[6.42, 6.44], #
"type": "c",
"vary": True, #False, #
"latex": r"$N_0$",
},
"sigma": {
"value": 55.81,
"range": [15.0, 70.0], #[58.0, 59.0], #
"type": "c",
"vary": True, #False, #
"latex": r"$\sigma$",
},
"Y": {
"value": 28.94,
"range": [5.0, 100.0], #[29., 31.], #
"type": "c",
"vary": True, #False, #
"latex": r"$Y$",
}
}
),
("DL07", {
"function": "DL07",
"logumin": {
"value": 0.699,
"range": [-1.0, 1.4], #log[0.1, 25]
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,U_\mathrm{min}$",
},
"logumax": {
"value": 6,
"range": [3, 6], #log[1e3, 1e6]
"type": "c",
"vary": False, #True, #
"latex": r"$\mathrm{log}\,U_\mathrm{max}$",
},
"qpah": {
"value": 0.47, #10**0.504,
"range": [0.3, 4.8],#10**[-1.0, 0.661],
"type": "c",
"vary": True, #False, #
"latex": r"$q_\mathrm{PAH}$",
},
"gamma": {
"value": 0.071,
"range": [0.0, 1.0], #[0.01, 0.03],
"type": "c",
"vary": True, #False, #
"latex": r"$\gamma$",
},
"logMd": {
"value": 8.42,
"range": [6.0, 11.0], #[9.0, 10.0],
"type": "c",
"vary": True, #False, #
"latex": r"$\mathrm{log}\,M_\mathrm{d}$",
}
}
),
)
)
parTruth = None #Whether to provide the truth of the model
modelUnct = True #Whether to consider the model uncertainty in the fitting
unctDict = OrderedDict(
(
("lnf" , [-10, 2]),
("lna" , [-10, 2]),
("lntau", [-5, 2.5]),
)
)
################################################################################
# emcee #
################################################################################
#emcee options#
#-------------#
emceeDict = OrderedDict(
(
("sampler" , "EnsembleSampler"),
#("ntemps" , 3),
("nwalkers" , 100),
("iteration" , 3),
("iter-step" , 500),
("ball-r" , 0.1),
("ball-t" , 1.0),
("run-step" , 2000),
("burn-in" , 1000),
("thin" , 1),
("threads" , 4),
("printfrac" , 0.1),
)
)
#Postprocess#
#-----------#
ppDict = {
"low": 16,
"center": 50,
"high": 84,
"nuisance": True, #False, #
"fraction": 10, #The fraction of walkers to be dropped.
}
| 6,773 | 32.87 | 155 | py |
MICO | MICO-main/setup.py | from setuptools import setup
if __name__ == "__main__":
setup()
| 69 | 13 | 28 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.